[clang] c680b0d - [RISCV][Clang] Add all RVV Reduction intrinsic functions.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Sun Apr 11 19:30:29 PDT 2021


Author: Zakk Chen
Date: 2021-04-11T19:19:01-07:00
New Revision: c680b0dabf367046c9acb397cddc5e3a2194bbaa

URL: https://github.com/llvm/llvm-project/commit/c680b0dabf367046c9acb397cddc5e3a2194bbaa
DIFF: https://github.com/llvm/llvm-project/commit/c680b0dabf367046c9acb397cddc5e3a2194bbaa.diff

LOG: [RISCV][Clang] Add all RVV Reduction intrinsic functions.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D99964

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c

Modified: 
    clang/include/clang/Basic/riscv_vector.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 749ffb180692..4497d37cfac9 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -404,6 +404,29 @@ class RVVConvToNarrowingSignedBuiltin<string mangled_name>
 class RVVConvToNarrowingUnsignedBuiltin<string mangled_name>
     : RVVConvBuiltin<"Uv", "UvFw", "si", mangled_name>;
 
+let HasMaskedOffOperand = false in {
+  multiclass RVVSignedReductionBuiltin {
+    defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+                                  [["vs", "vSv", "SvSvvSv"]]>;
+  }
+  multiclass RVVUnsignedReductionBuiltin {
+    defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+                                  [["vs", "UvUSv", "USvUSvUvUSv"]]>;
+  }
+  multiclass RVVFloatingReductionBuiltin {
+    defm "" : RVVOutOp1BuiltinSet<NAME, "fd",
+                                  [["vs", "vSv", "SvSvvSv"]]>;
+  }
+  multiclass RVVFloatingWidenReductionBuiltin {
+    defm "" : RVVOutOp1BuiltinSet<NAME, "f",
+                                  [["vs", "vSw", "SwSwvSw"]]>;
+  }
+}
+
+multiclass RVVIntReductionBuiltinSet
+    : RVVSignedReductionBuiltin,
+      RVVUnsignedReductionBuiltin;
+
 // For widen operation which has 
diff erent mangling name.
 multiclass RVVWidenBuiltinSet<string intrinsic_name, string type_range,
                               list<list<string>> suffixes_prototypes> {
@@ -961,3 +984,33 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
   def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_f">;
   def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "f", "vfncvt_rod_f">;
 }
+
+// 15. Vector Reduction Operations
+// 15.1. Vector Single-Width Integer Reduction Instructions
+defm vredsum : RVVIntReductionBuiltinSet;
+defm vredmaxu : RVVUnsignedReductionBuiltin;
+defm vredmax : RVVSignedReductionBuiltin;
+defm vredminu : RVVUnsignedReductionBuiltin;
+defm vredmin : RVVSignedReductionBuiltin;
+defm vredand : RVVIntReductionBuiltinSet;
+defm vredor : RVVIntReductionBuiltinSet;
+defm vredxor : RVVIntReductionBuiltinSet;
+
+// 15.2. Vector Widening Integer Reduction Instructions
+// Vector Widening Integer Reduction Operations
+let HasMaskedOffOperand = false in {
+  defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi",
+                                      [["vs", "vSw", "SwSwvSw"]]>;
+  defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi",
+                                       [["vs", "UvUSw", "USwUSwUvUSw"]]>;
+}
+
+// 15.3. Vector Single-Width Floating-Point Reduction Instructions
+defm vfredmax : RVVFloatingReductionBuiltin;
+defm vfredmin : RVVFloatingReductionBuiltin;
+defm vfredsum : RVVFloatingReductionBuiltin;
+defm vfredosum : RVVFloatingReductionBuiltin;
+
+// 15.4. Vector Widening Floating-Point Reduction Instructions
+defm vfwredsum : RVVFloatingWidenReductionBuiltin;
+defm vfwredosum : RVVFloatingWidenReductionBuiltin;

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
new file mode 100644
index 000000000000..c852f998b8ab
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmax.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                           vfloat32mf2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                            vfloat64m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                            vfloat64m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                            vfloat64m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                            vfloat64m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
new file mode 100644
index 000000000000..daf7cd77a4b4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredmin.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                           vfloat32mf2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                            vfloat64m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                            vfloat64m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                            vfloat64m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                            vfloat64m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
new file mode 100644
index 000000000000..5d032716bc29
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
@@ -0,0 +1,579 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                           vfloat32mf2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                            vfloat64m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                            vfloat64m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                            vfloat64m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                            vfloat64m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
+                                           vfloat32m1_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
+                                           vfloat32m2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
+                                           vfloat32m4_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
+                                           vfloat32m8_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
+                                           vfloat64m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
+                                           vfloat64m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
+                                           vfloat64m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
+                                           vfloat64m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                             vfloat64m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat64m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat64m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat64m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
new file mode 100644
index 000000000000..e1339bf25124
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
@@ -0,0 +1,331 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
+                                           vfloat32m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
+                                           vfloat32m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
+                                           vfloat32m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
+                                           vfloat32m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                               vfloat32mf2_t vector,
+                                               vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                              vfloat32m1_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                              vfloat32m2_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                              vfloat32m4_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+                                              vfloat32m8_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
new file mode 100644
index 000000000000..17b893e694b9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredand.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                        vuint8mf8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                        vuint8mf4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                        vuint8mf2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                       vuint8m1_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                       vuint8m2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                       vuint8m4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                       vuint8m8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                           vuint16mf4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                           vuint16mf2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                          vuint16m1_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                          vuint16m2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                          vuint16m4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                          vuint16m8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                           vuint32mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                          vuint32m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                          vuint32m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                          vuint32m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                          vuint32m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                          vuint64m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                          vuint64m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                          vuint64m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                          vuint64m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
new file mode 100644
index 000000000000..e6b215d5c31f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmax.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                         vuint8mf8_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                         vuint8mf4_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                         vuint8mf2_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                        vuint8m1_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                        vuint8m2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                        vuint8m4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                        vuint8m8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                            vuint16mf4_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                            vuint16mf2_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                           vuint16m1_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                           vuint16m2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                           vuint16m4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                           vuint16m8_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                            vuint32mf2_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                           vuint32m1_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                           vuint32m2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                           vuint32m4_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                           vuint32m8_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                           vuint64m1_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                           vuint64m2_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                           vuint64m4_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                           vuint64m8_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
new file mode 100644
index 000000000000..cefff41a13b6
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredmin.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                         vuint8mf8_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                         vuint8mf4_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                         vuint8mf2_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                        vuint8m1_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                        vuint8m2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                        vuint8m4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                        vuint8m8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                            vuint16mf4_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                            vuint16mf2_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                           vuint16m1_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                           vuint16m2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                           vuint16m4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                           vuint16m8_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                            vuint32mf2_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                           vuint32m1_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                           vuint32m2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                           vuint32m4_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                           vuint32m8_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                           vuint64m1_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                           vuint64m2_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                           vuint64m4_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                           vuint64m8_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
new file mode 100644
index 000000000000..034b7ef3eb4c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                      vint8mf8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                      vint8mf4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                      vint8mf2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                     vint8m1_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                     vint8m2_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                     vint8m4_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                     vint8m8_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                         vint16mf4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                         vint16mf2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                        vint16m1_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                        vint16m2_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                        vint16m4_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                        vint16m8_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                         vint32mf2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                        vint32m1_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                        vint32m2_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                        vint32m4_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                        vint32m8_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                        vint64m1_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                        vint64m2_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                        vint64m4_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                        vint64m8_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                       vuint8mf8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                       vuint8mf4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                       vuint8mf2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                      vuint8m1_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                      vuint8m2_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                      vuint8m4_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                      vuint8m8_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                          vuint16mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                          vuint16mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                         vuint16m1_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                         vuint16m2_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                         vuint16m4_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                         vuint16m8_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                          vuint32mf2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                         vuint32m1_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                         vuint32m2_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                         vuint32m4_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                         vuint32m8_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                         vuint64m1_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                         vuint64m2_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                         vuint64m4_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                         vuint64m8_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
new file mode 100644
index 000000000000..6fdadc8c78f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredsum.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                        vuint8mf8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                        vuint8mf4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                        vuint8mf2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                       vuint8m1_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                       vuint8m2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                       vuint8m4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                       vuint8m8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                           vuint16mf4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                           vuint16mf2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                          vuint16m1_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                          vuint16m2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                          vuint16m4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                          vuint16m8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                           vuint32mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                          vuint32m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                          vuint32m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                          vuint32m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                          vuint32m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                          vuint64m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                          vuint64m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                          vuint64m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                          vuint64m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
new file mode 100644
index 000000000000..3c9729ab3994
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vredxor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                        vuint8mf8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                        vuint8mf4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                        vuint8mf2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                       vuint8m1_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                       vuint8m2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                       vuint8m4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                       vuint8m8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                           vuint16mf4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                           vuint16mf2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                          vuint16m1_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                          vuint16m2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                          vuint16m4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                          vuint16m8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                           vuint32mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                          vuint32m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                          vuint32m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                          vuint32m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                          vuint32m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                          vuint64m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                          vuint64m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                          vuint64m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                          vuint64m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
new file mode 100644
index 000000000000..ef6d793ade34
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwredsum.c
@@ -0,0 +1,1124 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector,
+                                         vint32m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector,
+                                         vint32m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector,
+                                         vint64m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint8mf8_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint8mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                          vint8mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint8m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint8m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint8m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst,
+                                         vint8m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                           vint16mf4_t vector,
+                                           vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                           vint16mf2_t vector,
+                                           vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                          vint16m1_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                          vint16m2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                          vint16m4_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst,
+                                          vint16m8_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                           vint32mf2_t vector,
+                                           vint64m1_t scalar, size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                          vint32m1_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                          vint32m2_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                          vint32m4_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst,
+                                          vint32m8_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                            vuint8mf8_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                            vuint8mf4_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                            vuint8mf2_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                           vuint8m1_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                           vuint8m2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                           vuint8m4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst,
+                                           vuint8m8_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                             vuint16mf4_t vector,
+                                             vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                             vuint16mf2_t vector,
+                                             vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                            vuint16m1_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                            vuint16m2_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                            vuint16m4_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst,
+                                            vuint16m8_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                             vuint32mf2_t vector,
+                                             vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                            vuint32m1_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                            vuint32m2_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                            vuint32m4_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR5]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst,
+                                            vuint32m8_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
new file mode 100644
index 000000000000..13ab0a1588cd
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmax.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                           vfloat32mf2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                            vfloat64m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                            vfloat64m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                            vfloat64m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                            vfloat64m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
new file mode 100644
index 000000000000..6e4b0b114946
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredmin.c
@@ -0,0 +1,291 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                           vfloat32mf2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                            vfloat64m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                            vfloat64m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                            vfloat64m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                            vfloat64m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
new file mode 100644
index 000000000000..190a6e31c8d0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -0,0 +1,579 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                           vfloat32mf2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                          vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                          vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                            vfloat64m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                            vfloat64m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                            vfloat64m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                            vfloat64m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dst,
+                                           vfloat32m1_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m1_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dst,
+                                           vfloat32m2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m2_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dst,
+                                           vfloat32m4_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m4_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dst,
+                                           vfloat32m8_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m8_f32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dst,
+                                           vfloat64m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dst,
+                                           vfloat64m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dst,
+                                           vfloat64m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dst,
+                                           vfloat64m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i32(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                             vfloat64m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat64m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat64m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat64m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
new file mode 100644
index 000000000000..c010bcf608a8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredosum.c
@@ -0,0 +1,171 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                               vfloat32mf2_t vector,
+                                               vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                              vfloat32m1_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                              vfloat32m2_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                              vfloat32m4_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+                                              vfloat32m8_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
new file mode 100644
index 000000000000..023ecd72ccf4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
@@ -0,0 +1,171 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
+                                           vfloat32m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
+                                           vfloat32m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
+                                           vfloat32m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
+                                           vfloat32m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i32(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
new file mode 100644
index 000000000000..268f2e4cd9f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredand.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredand_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredand_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredand_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredand_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredand_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                        vuint8mf8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredand_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                        vuint8mf4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredand_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                        vuint8mf2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredand_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                       vuint8m1_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                       vuint8m2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                       vuint8m4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                       vuint8m8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredand_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                           vuint16mf4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                           vuint16mf2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                          vuint16m1_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                          vuint16m2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                          vuint16m4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                          vuint16m8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                           vuint32mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                          vuint32m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                          vuint32m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                          vuint32m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                          vuint32m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                          vuint64m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                          vuint64m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                          vuint64m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                          vuint64m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
new file mode 100644
index 000000000000..490121430daa
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmax.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmax_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmax_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmax_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmax_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmax_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmax_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredmax_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmax_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                         vuint8mf8_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredmaxu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                         vuint8mf4_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredmaxu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                         vuint8mf2_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredmaxu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                        vuint8m1_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                        vuint8m2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                        vuint8m4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                        vuint8m8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredmaxu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                            vuint16mf4_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                            vuint16mf2_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                           vuint16m1_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                           vuint16m2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                           vuint16m4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                           vuint16m8_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                            vuint32mf2_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                           vuint32m1_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                           vuint32m2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                           vuint32m4_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                           vuint32m8_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                           vuint64m1_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                           vuint64m2_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                           vuint64m4_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                           vuint64m8_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
new file mode 100644
index 000000000000..e9e8cac3a5a4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredmin.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                       vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                         vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmin_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmin_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredmin_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredmin_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmin_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredmin_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredmin_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredmin_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                         vuint8mf8_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredminu_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                         vuint8mf4_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredminu_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                         vuint8mf2_t vector, vuint8m1_t scalar,
+                                         size_t vl) {
+  return vredminu_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                        vuint8m1_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                        vuint8m2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                        vuint8m4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                        vuint8m8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredminu_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                            vuint16mf4_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                            vuint16mf2_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                           vuint16m1_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                           vuint16m2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                           vuint16m4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                           vuint16m8_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                            vuint32mf2_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                           vuint32m1_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                           vuint32m2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                           vuint32m4_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                           vuint32m8_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                           vuint64m1_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                           vuint64m2_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                           vuint64m4_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                           vuint64m8_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
new file mode 100644
index 000000000000..c933e4a0f73a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                   vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                      vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                      vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                      vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                    vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                       vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                       vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                       vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                      vint8mf8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                      vint8mf4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                      vint8mf2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                     vint8m1_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                     vint8m2_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                     vint8m4_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                     vint8m8_t vector, vint8m1_t scalar,
+                                     size_t vl) {
+  return vredor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                         vint16mf4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                         vint16mf2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                        vint16m1_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                        vint16m2_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                        vint16m4_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                        vint16m8_t vector, vint16m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                         vint32mf2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                        vint32m1_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                        vint32m2_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                        vint32m4_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                        vint32m8_t vector, vint32m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                        vint64m1_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                        vint64m2_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                        vint64m4_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                        vint64m8_t vector, vint64m1_t scalar,
+                                        size_t vl) {
+  return vredor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                       vuint8mf8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                       vuint8mf4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                       vuint8mf2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                      vuint8m1_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                      vuint8m2_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                      vuint8m4_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                      vuint8m8_t vector, vuint8m1_t scalar,
+                                      size_t vl) {
+  return vredor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                          vuint16mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                          vuint16mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                         vuint16m1_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                         vuint16m2_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                         vuint16m4_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                         vuint16m8_t vector, vuint16m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                          vuint32mf2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                         vuint32m1_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                         vuint32m2_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                         vuint32m4_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                         vuint32m8_t vector, vuint32m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                         vuint64m1_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                         vuint64m2_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                         vuint64m4_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                         vuint64m8_t vector, vuint64m1_t scalar,
+                                         size_t vl) {
+  return vredor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
new file mode 100644
index 000000000000..e574243d0f2d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredsum.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredsum_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredsum_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredsum_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredsum_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredsum_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                        vuint8mf8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredsum_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                        vuint8mf4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredsum_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                        vuint8mf2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredsum_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                       vuint8m1_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                       vuint8m2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                       vuint8m4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                       vuint8m8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredsum_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                           vuint16mf4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                           vuint16mf2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                          vuint16m1_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                          vuint16m2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                          vuint16m4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                          vuint16m8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                           vuint32mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                          vuint32m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                          vuint32m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                          vuint32m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                          vuint32m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                          vuint64m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                          vuint64m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                          vuint64m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                          vuint64m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
new file mode 100644
index 000000000000..04de6d0e1e6b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vredxor.c
@@ -0,0 +1,1372 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dst, vint8mf8_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dst, vint8mf4_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dst, vint8mf2_t vector,
+                                     vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dst, vint8m1_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m1_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dst, vint8m2_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m2_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dst, vint8m4_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m4_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dst, vint8m8_t vector,
+                                    vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m8_i8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dst, vint16mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dst, vint16mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dst, vint16m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dst, vint16m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dst, vint16m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dst, vint16m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dst, vint32mf2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dst, vint32m1_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dst, vint32m2_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dst, vint32m4_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dst, vint32m8_t vector,
+                                       vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dst, vint64m1_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dst, vint64m2_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dst, vint64m4_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dst, vint64m8_t vector,
+                                       vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dst, vuint8mf8_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dst, vuint8mf4_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dst, vuint8mf2_t vector,
+                                      vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dst, vuint8m1_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m1_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dst, vuint8m2_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m2_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dst, vuint8m4_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m4_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dst, vuint8m8_t vector,
+                                     vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m8_u8m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dst, vuint16mf4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dst, vuint16mf2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dst, vuint16m1_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dst, vuint16m2_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dst, vuint16m4_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dst, vuint16m8_t vector,
+                                        vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dst, vuint32mf2_t vector,
+                                         vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dst, vuint32m1_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dst, vuint32m2_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dst, vuint32m4_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dst, vuint32m8_t vector,
+                                        vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dst, vuint64m1_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dst, vuint64m2_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dst, vuint64m4_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dst, vuint64m8_t vector,
+                                        vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dst,
+                                       vint8mf8_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_i8mf8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dst,
+                                       vint8mf4_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_i8mf4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dst,
+                                       vint8mf2_t vector, vint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_i8mf2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dst,
+                                      vint8m1_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor_vs_i8m1_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dst,
+                                      vint8m2_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor_vs_i8m2_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dst,
+                                      vint8m4_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor_vs_i8m4_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dst,
+                                      vint8m8_t vector, vint8m1_t scalar,
+                                      size_t vl) {
+  return vredxor_vs_i8m8_i8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint16mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredxor_vs_i16mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint16mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vredxor_vs_i16mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                         vint16m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i16m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint16m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i16m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint16m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i16m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint16m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i16m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                          vint32mf2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vredxor_vs_i32mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                         vint32m1_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i32m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                         vint32m2_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i32m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                         vint32m4_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i32m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                         vint32m8_t vector, vint32m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i32m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                         vint64m1_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i64m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                         vint64m2_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i64m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                         vint64m4_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i64m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                         vint64m8_t vector, vint64m1_t scalar,
+                                         size_t vl) {
+  return vredxor_vs_i64m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dst,
+                                        vuint8mf8_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredxor_vs_u8mf8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dst,
+                                        vuint8mf4_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredxor_vs_u8mf4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dst,
+                                        vuint8mf2_t vector, vuint8m1_t scalar,
+                                        size_t vl) {
+  return vredxor_vs_u8mf2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dst,
+                                       vuint8m1_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_u8m1_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dst,
+                                       vuint8m2_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_u8m2_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dst,
+                                       vuint8m4_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_u8m4_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i32(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dst,
+                                       vuint8m8_t vector, vuint8m1_t scalar,
+                                       size_t vl) {
+  return vredxor_vs_u8m8_u8m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                           vuint16mf4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                           vuint16mf2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                          vuint16m1_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                          vuint16m2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                          vuint16m4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                          vuint16m8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                           vuint32mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                          vuint32m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                          vuint32m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                          vuint32m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                          vuint32m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                          vuint64m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                          vuint64m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                          vuint64m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                          vuint64m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m8_u64m1_m(mask, dst, vector, scalar, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
new file mode 100644
index 000000000000..3ccba3e5949e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwredsum.c
@@ -0,0 +1,1124 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dst, vint8mf8_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dst, vint8mf4_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dst, vint8mf2_t vector,
+                                        vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dst, vint8m1_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m1_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dst, vint8m2_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m2_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dst, vint8m4_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m4_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dst, vint8m8_t vector,
+                                       vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m8_i16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dst, vint16mf4_t vector,
+                                         vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dst, vint16mf2_t vector,
+                                         vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dst, vint16m1_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m1_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dst, vint16m2_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m2_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dst, vint16m4_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m4_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dst, vint16m8_t vector,
+                                        vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m8_i32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dst, vint32mf2_t vector,
+                                         vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32mf2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dst, vint32m1_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m1_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dst, vint32m2_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m2_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dst, vint32m4_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m4_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dst, vint32m8_t vector,
+                                        vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m8_i64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dst, vuint8mf8_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dst, vuint8mf4_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dst, vuint8mf2_t vector,
+                                          vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dst, vuint8m1_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m1_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dst, vuint8m2_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m2_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dst, vuint8m4_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m4_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dst, vuint8m8_t vector,
+                                         vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m8_u16m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dst, vuint16mf4_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dst, vuint16mf2_t vector,
+                                           vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dst, vuint16m1_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m1_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dst, vuint16m2_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m2_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dst, vuint16m4_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m4_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dst, vuint16m8_t vector,
+                                          vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m8_u32m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dst, vuint32mf2_t vector,
+                                           vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32mf2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dst, vuint32m1_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m1_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dst, vuint32m2_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m2_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dst, vuint32m4_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m4_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dst, vuint32m8_t vector,
+                                          vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m8_u64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dst,
+                                          vint8mf8_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i8mf8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dst,
+                                          vint8mf4_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i8mf4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dst,
+                                          vint8mf2_t vector, vint16m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i8mf2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dst,
+                                         vint8m1_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum_vs_i8m1_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dst,
+                                         vint8m2_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum_vs_i8m2_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dst,
+                                         vint8m4_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum_vs_i8m4_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dst,
+                                         vint8m8_t vector, vint16m1_t scalar,
+                                         size_t vl) {
+  return vwredsum_vs_i8m8_i16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dst,
+                                           vint16mf4_t vector,
+                                           vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dst,
+                                           vint16mf2_t vector,
+                                           vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dst,
+                                          vint16m1_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i16m1_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dst,
+                                          vint16m2_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i16m2_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dst,
+                                          vint16m4_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i16m4_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dst,
+                                          vint16m8_t vector, vint32m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i16m8_i32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dst,
+                                           vint32mf2_t vector,
+                                           vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32mf2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dst,
+                                          vint32m1_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i32m1_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dst,
+                                          vint32m2_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i32m2_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dst,
+                                          vint32m4_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i32m4_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dst,
+                                          vint32m8_t vector, vint64m1_t scalar,
+                                          size_t vl) {
+  return vwredsum_vs_i32m8_i64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dst,
+                                            vuint8mf8_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dst,
+                                            vuint8mf4_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dst,
+                                            vuint8mf2_t vector,
+                                            vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dst,
+                                           vuint8m1_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m1_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dst,
+                                           vuint8m2_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m2_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dst,
+                                           vuint8m4_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m4_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i32(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dst,
+                                           vuint8m8_t vector,
+                                           vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m8_u16m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dst,
+                                             vuint16mf4_t vector,
+                                             vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dst,
+                                             vuint16mf2_t vector,
+                                             vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dst,
+                                            vuint16m1_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m1_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dst,
+                                            vuint16m2_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m2_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dst,
+                                            vuint16m4_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m4_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i32(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dst,
+                                            vuint16m8_t vector,
+                                            vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m8_u32m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dst,
+                                             vuint32mf2_t vector,
+                                             vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32mf2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dst,
+                                            vuint32m1_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m1_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dst,
+                                            vuint32m2_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m2_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dst,
+                                            vuint32m4_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m4_u64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i32(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dst,
+                                            vuint32m8_t vector,
+                                            vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m8_u64m1_m(mask, dst, vector, scalar, vl);
+}


        


More information about the cfe-commits mailing list