[llvm-branch-commits] [llvm] afd03cd - [RISCV] Define vector single-width reduction intrinsic.
Monk Chiang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Dec 24 18:01:45 PST 2020
Author: Monk Chiang
Date: 2020-12-25T09:56:01+08:00
New Revision: afd03cd33582be4931cf7957354e583bcfe8d99e
URL: https://github.com/llvm/llvm-project/commit/afd03cd33582be4931cf7957354e583bcfe8d99e
DIFF: https://github.com/llvm/llvm-project/commit/afd03cd33582be4931cf7957354e583bcfe8d99e.diff
LOG: [RISCV] Define vector single-width reduction intrinsic.
integer group:
vredsum/vredmaxu/vredmax/vredminu/vredmin/vredand/vredor/vredxor
float group:
vfredosum/vfredsum/vfredmax/vfredmin
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>
Differential Revision: https://reviews.llvm.org/D93746
Added:
llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 37e463dedfcb..e6af2374a500 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -352,6 +352,23 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 2;
}
+ // For Reduction ternary operations.
+ // For destination vector type is the same as first and third source vector.
+ // Input: (vector_in, vector_in, vector_in, vl)
+ class RISCVReductionNoMask
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
+ llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic;
+ // For Reduction ternary operations with mask.
+ // For destination vector type is the same as first and third source vector.
+ // The mask type come from second source vector.
+ // Input: (maskedoff, vector_in, vector_in, vector_in, mask, vl)
+ class RISCVReductionMask
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic;
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
@@ -424,6 +441,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
}
+ multiclass RISCVReduction {
+ def "int_riscv_" # NAME : RISCVReductionNoMask;
+ def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
+ }
defm vle : RISCVUSLoad;
defm vleff : RISCVUSLoad;
@@ -606,4 +627,18 @@ let TargetPrefix = "riscv" in {
defm vmfle : RISCVCompare;
defm vmfgt : RISCVCompare;
defm vmfge : RISCVCompare;
+
+ defm vredsum : RISCVReduction;
+ defm vredand : RISCVReduction;
+ defm vredor : RISCVReduction;
+ defm vredxor : RISCVReduction;
+ defm vredminu : RISCVReduction;
+ defm vredmin : RISCVReduction;
+ defm vredmaxu : RISCVReduction;
+ defm vredmax : RISCVReduction;
+
+ defm vfredosum : RISCVReduction;
+ defm vfredsum : RISCVReduction;
+ defm vfredmin : RISCVReduction;
+ defm vfredmax : RISCVReduction;
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 99c9e446641a..ef1003dee7af 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -110,32 +110,35 @@ class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
defset list<VTypeInfo> AllVectors = {
defset list<VTypeInfo> AllIntegerVectors = {
- def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
- def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
- def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
- def VI8M1: VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>;
- def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
- def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
- def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>;
- def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
- def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>;
- def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>;
-
- def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
- def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
- def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
-
- def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
- def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
- def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
-
- def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
- def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
- def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
-
- def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
- def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
- def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
+ defset list<VTypeInfo> NoGroupIntegerVectors = {
+ def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
+ def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
+ def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
+ def VI8M1: VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>;
+ def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
+ def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
+ def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>;
+ def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
+ def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>;
+ def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>;
+ }
+ defset list<GroupVTypeInfo> GroupIntegerVectors = {
+ def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
+ def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
+ def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
+
+ def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
+ def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
+ def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
+
+ def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
+ def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
+ def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
+
+ def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
+ def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
+ def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
+ }
}
defset list<VTypeInfo> AllFloatVectors = {
@@ -949,6 +952,11 @@ multiclass VPseudoBinaryM_VX_VI {
defm "" : VPseudoBinaryM_VI;
}
+multiclass VPseudoReductionV_VS {
+ foreach m = MxList.m in
+ defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the intrinsic patterns.
//===----------------------------------------------------------------------===//
@@ -1636,6 +1644,26 @@ multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
}
+multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
+ foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
+ {
+ defvar vectorM1 = !cast<VTypeInfo>(!if(IsFloat, "VF", "VI") # vti.SEW # "M1");
+ defm : VPatTernary<intrinsic, instruction, "VS",
+ vectorM1.Vector, vti.Vector,
+ vectorM1.Vector, vti.Mask,
+ vti.SEW, vti.LMul,
+ VR, vti.RegClass, VR>;
+ }
+ foreach gvti = !if(IsFloat, GroupFloatVectors, GroupIntegerVectors) in
+ {
+ defm : VPatTernary<intrinsic, instruction, "VS",
+ gvti.VectorM1, gvti.Vector,
+ gvti.VectorM1, gvti.Mask,
+ gvti.SEW, gvti.LMul,
+ VR, gvti.RegClass, VR>;
+ }
+}
+
//===----------------------------------------------------------------------===//
// Pseudo instructions and patterns.
//===----------------------------------------------------------------------===//
@@ -1963,6 +1991,30 @@ defm PseudoVFMERGE : VPseudoBinaryV_XM</*CarryOut =*/0,/*CarryIn =*/true,
/*Constraint =*/"", /*IsFloat=*/true>;
} // Predicates = [HasStdExtV, HasStdExtF]
+let Predicates = [HasStdExtV] in {
+//===----------------------------------------------------------------------===//
+// 15.1. Vector Single-Width Integer Reduction Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVREDSUM : VPseudoReductionV_VS;
+defm PseudoVREDAND : VPseudoReductionV_VS;
+defm PseudoVREDOR : VPseudoReductionV_VS;
+defm PseudoVREDXOR : VPseudoReductionV_VS;
+defm PseudoVREDMINU : VPseudoReductionV_VS;
+defm PseudoVREDMIN : VPseudoReductionV_VS;
+defm PseudoVREDMAXU : VPseudoReductionV_VS;
+defm PseudoVREDMAX : VPseudoReductionV_VS;
+} // Predicates = [HasStdExtV]
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+//===----------------------------------------------------------------------===//
+// 15.3. Vector Single-Width Floating-Point Reduction Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVFREDOSUM : VPseudoReductionV_VS;
+defm PseudoVFREDSUM : VPseudoReductionV_VS;
+defm PseudoVFREDMIN : VPseudoReductionV_VS;
+defm PseudoVFREDMAX : VPseudoReductionV_VS;
+} // Predicates = [HasStdExtV, HasStdExtF]
+
//===----------------------------------------------------------------------===//
// 17.1. Integer Scalar Move Instructions
//===----------------------------------------------------------------------===//
@@ -2393,6 +2445,30 @@ defm "" : VPatBinaryV_XM<"int_riscv_vfmerge", "PseudoVFMERGE",
} // Predicates = [HasStdExtV, HasStdExtF]
+let Predicates = [HasStdExtV] in {
+//===----------------------------------------------------------------------===//
+// 15.1. Vector Single-Width Integer Reduction Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatReductionV_VS<"int_riscv_vredsum", "PseudoVREDSUM">;
+defm "" : VPatReductionV_VS<"int_riscv_vredand", "PseudoVREDAND">;
+defm "" : VPatReductionV_VS<"int_riscv_vredor", "PseudoVREDOR">;
+defm "" : VPatReductionV_VS<"int_riscv_vredxor", "PseudoVREDXOR">;
+defm "" : VPatReductionV_VS<"int_riscv_vredminu", "PseudoVREDMINU">;
+defm "" : VPatReductionV_VS<"int_riscv_vredmin", "PseudoVREDMIN">;
+defm "" : VPatReductionV_VS<"int_riscv_vredmaxu", "PseudoVREDMAXU">;
+defm "" : VPatReductionV_VS<"int_riscv_vredmax", "PseudoVREDMAX">;
+} // Predicates = [HasStdExtV]
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+//===----------------------------------------------------------------------===//
+// 15.3. Vector Single-Width Floating-Point Reduction Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
+defm "" : VPatReductionV_VS<"int_riscv_vfredsum", "PseudoVFREDSUM", /*IsFloat=*/1>;
+defm "" : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
+defm "" : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
+} // Predicates = [HasStdExtV, HasStdExtF]
+
//===----------------------------------------------------------------------===//
// 17. Vector Permutation Instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
new file mode 100644
index 000000000000..6d2fa56686f8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
@@ -0,0 +1,463 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
new file mode 100644
index 000000000000..2977a633a4a3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
@@ -0,0 +1,631 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
new file mode 100644
index 000000000000..fefa2e5de06a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
@@ -0,0 +1,463 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
new file mode 100644
index 000000000000..60da063f5698
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
@@ -0,0 +1,631 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
new file mode 100644
index 000000000000..e50e92a6bd20
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
@@ -0,0 +1,463 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
new file mode 100644
index 000000000000..948ad9f18da1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
@@ -0,0 +1,631 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
new file mode 100644
index 000000000000..87393c291ca8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
@@ -0,0 +1,463 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
new file mode 100644
index 000000000000..7000191c9a64
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
@@ -0,0 +1,631 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half>,
+ <vscale x 1 x half>,
+ <vscale x 4 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+ <vscale x 4 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half>,
+ <vscale x 2 x half>,
+ <vscale x 4 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+ <vscale x 4 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half>,
+ <vscale x 8 x half>,
+ <vscale x 4 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+ <vscale x 4 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half>,
+ <vscale x 16 x half>,
+ <vscale x 4 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+ <vscale x 4 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half>,
+ <vscale x 32 x half>,
+ <vscale x 4 x half>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+ <vscale x 4 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 4 x half> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float>,
+ <vscale x 1 x float>,
+ <vscale x 2 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+ <vscale x 2 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float>,
+ <vscale x 4 x float>,
+ <vscale x 2 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+ <vscale x 2 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float>,
+ <vscale x 8 x float>,
+ <vscale x 2 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+ <vscale x 2 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float>,
+ <vscale x 16 x float>,
+ <vscale x 2 x float>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+ <vscale x 2 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 2 x float> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double>,
+ <vscale x 2 x double>,
+ <vscale x 1 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
+ <vscale x 1 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double>,
+ <vscale x 4 x double>,
+ <vscale x 1 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
+ <vscale x 1 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double>,
+ <vscale x 8 x double>,
+ <vscale x 1 x double>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
+ <vscale x 1 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 1 x double> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
new file mode 100644
index 000000000000..305efe880361
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
new file mode 100644
index 000000000000..eeeeb9242467
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
new file mode 100644
index 000000000000..5a2c7ab416c7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
new file mode 100644
index 000000000000..939ae4599031
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
new file mode 100644
index 000000000000..734fc33ba4fd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
new file mode 100644
index 000000000000..68af5ac3897a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
new file mode 100644
index 000000000000..6da1738afe20
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
new file mode 100644
index 000000000000..6c6bc85ca159
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
new file mode 100644
index 000000000000..9f26f5dea491
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
new file mode 100644
index 000000000000..559a870c8354
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
new file mode 100644
index 000000000000..5b0e2f099675
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
new file mode 100644
index 000000000000..9b8022081411
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
new file mode 100644
index 000000000000..b2b95979526d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
new file mode 100644
index 000000000000..7641b6ca0a4d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
new file mode 100644
index 000000000000..3ea6a2532085
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
@@ -0,0 +1,715 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
new file mode 100644
index 000000000000..b61705eb8159
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
@@ -0,0 +1,883 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+ <vscale x 8 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+ <vscale x 4 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+ <vscale x 2 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.nxv1i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.nxv2i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.nxv4i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.nxv8i1(
+ <vscale x 1 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
More information about the llvm-branch-commits
mailing list