[clang] 670c72f - [RISCV] Restore tests for vf(w)redusum.

Craig Topper via cfe-commits cfe-commits at lists.llvm.org
Mon Nov 1 14:39:36 PDT 2021


Author: Craig Topper
Date: 2021-11-01T14:35:22-07:00
New Revision: 670c72f6f70434500d1475e1524a7088814fbc73

URL: https://github.com/llvm/llvm-project/commit/670c72f6f70434500d1475e1524a7088814fbc73
DIFF: https://github.com/llvm/llvm-project/commit/670c72f6f70434500d1475e1524a7088814fbc73.diff

LOG: [RISCV] Restore tests for vf(w)redusum.

When D105690 changed the mnemonic from vf(w)redsum to vf(w)redusum,
several tests were deleted instead of being renamed.

This commit also consistently renames the other tests that weren't
deleted.

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredusum.c
    llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll

Modified: 
    

Removed: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c


################################################################################
diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredusum.c
similarity index 100%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredusum.c

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredusum.c
similarity index 100%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredusum.c

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredusum.c
similarity index 100%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics/vfredusum.c

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredusum.c
new file mode 100644
index 0000000000000..74a71208fddb9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredusum.c
@@ -0,0 +1,225 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
+// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst,
+                                           vfloat32m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m1_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst,
+                                           vfloat32m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m2_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst,
+                                           vfloat32m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m4_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst,
+                                           vfloat32m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m8_f64m1(dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m1_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m2_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m4_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m8_f32m1(dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll
new file mode 100644
index 0000000000000..ad4e946a01472
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv32.ll
@@ -0,0 +1,692 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
+  <vscale x 4 x half>,
+  <vscale x 1 x half>,
+  <vscale x 4 x half>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
+    <vscale x 4 x half> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 4 x half> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1(
+  <vscale x 4 x half>,
+  <vscale x 1 x half>,
+  <vscale x 4 x half>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1(
+    <vscale x 4 x half> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
+  <vscale x 4 x half>,
+  <vscale x 2 x half>,
+  <vscale x 4 x half>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
+    <vscale x 4 x half> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 4 x half> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1(
+  <vscale x 4 x half>,
+  <vscale x 2 x half>,
+  <vscale x 4 x half>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1(
+    <vscale x 4 x half> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x half> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
+  <vscale x 4 x half>,
+  <vscale x 8 x half>,
+  <vscale x 4 x half>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
+    <vscale x 4 x half> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 4 x half> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1(
+  <vscale x 4 x half>,
+  <vscale x 8 x half>,
+  <vscale x 4 x half>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1(
+    <vscale x 4 x half> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
+  <vscale x 4 x half>,
+  <vscale x 16 x half>,
+  <vscale x 4 x half>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
+    <vscale x 4 x half> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 4 x half> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1(
+  <vscale x 4 x half>,
+  <vscale x 16 x half>,
+  <vscale x 4 x half>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1(
+    <vscale x 4 x half> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
+  <vscale x 4 x half>,
+  <vscale x 32 x half>,
+  <vscale x 4 x half>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
+    <vscale x 4 x half> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 4 x half> %2,
+    i32 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1(
+  <vscale x 4 x half>,
+  <vscale x 32 x half>,
+  <vscale x 4 x half>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1(
+    <vscale x 4 x half> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
+  <vscale x 2 x float>,
+  <vscale x 1 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1(
+  <vscale x 2 x float>,
+  <vscale x 1 x float>,
+  <vscale x 2 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
+  <vscale x 2 x float>,
+  <vscale x 4 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1(
+  <vscale x 2 x float>,
+  <vscale x 4 x float>,
+  <vscale x 2 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
+  <vscale x 2 x float>,
+  <vscale x 8 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1(
+  <vscale x 2 x float>,
+  <vscale x 8 x float>,
+  <vscale x 2 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
+  <vscale x 2 x float>,
+  <vscale x 16 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1(
+  <vscale x 2 x float>,
+  <vscale x 16 x float>,
+  <vscale x 2 x float>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll
new file mode 100644
index 0000000000000..2f0f81533edb6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredusum-rv64.ll
@@ -0,0 +1,692 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
+  <vscale x 4 x half>,
+  <vscale x 1 x half>,
+  <vscale x 4 x half>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
+    <vscale x 4 x half> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 4 x half> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16(
+  <vscale x 4 x half>,
+  <vscale x 1 x half>,
+  <vscale x 4 x half>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16(
+    <vscale x 4 x half> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
+  <vscale x 4 x half>,
+  <vscale x 2 x half>,
+  <vscale x 4 x half>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
+    <vscale x 4 x half> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 4 x half> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16(
+  <vscale x 4 x half>,
+  <vscale x 2 x half>,
+  <vscale x 4 x half>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16(
+    <vscale x 4 x half> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x half> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  <vscale x 4 x half>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
+  <vscale x 4 x half>,
+  <vscale x 8 x half>,
+  <vscale x 4 x half>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
+    <vscale x 4 x half> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 4 x half> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16(
+  <vscale x 4 x half>,
+  <vscale x 8 x half>,
+  <vscale x 4 x half>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16(
+    <vscale x 4 x half> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
+  <vscale x 4 x half>,
+  <vscale x 16 x half>,
+  <vscale x 4 x half>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
+    <vscale x 4 x half> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 4 x half> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16(
+  <vscale x 4 x half>,
+  <vscale x 16 x half>,
+  <vscale x 4 x half>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16(
+    <vscale x 4 x half> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
+  <vscale x 4 x half>,
+  <vscale x 32 x half>,
+  <vscale x 4 x half>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
+    <vscale x 4 x half> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 4 x half> %2,
+    i64 %3)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16(
+  <vscale x 4 x half>,
+  <vscale x 32 x half>,
+  <vscale x 4 x half>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16(
+    <vscale x 4 x half> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 4 x half> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
+  <vscale x 2 x float>,
+  <vscale x 1 x float>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32(
+  <vscale x 2 x float>,
+  <vscale x 1 x float>,
+  <vscale x 2 x float>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
+  <vscale x 2 x float>,
+  <vscale x 4 x float>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32(
+  <vscale x 2 x float>,
+  <vscale x 4 x float>,
+  <vscale x 2 x float>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
+  <vscale x 2 x float>,
+  <vscale x 8 x float>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32(
+  <vscale x 2 x float>,
+  <vscale x 8 x float>,
+  <vscale x 2 x float>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
+  <vscale x 2 x float>,
+  <vscale x 16 x float>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32(
+  <vscale x 2 x float>,
+  <vscale x 16 x float>,
+  <vscale x 2 x float>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x double>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x double>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x double>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
+; CHECK-NEXT:    vfredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll
new file mode 100644
index 0000000000000..25064a75cc795
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv32.ll
@@ -0,0 +1,508 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
+  <vscale x 2 x float>,
+  <vscale x 1 x half>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 1 x half>,
+  <vscale x 2 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
+  <vscale x 2 x float>,
+  <vscale x 2 x half>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x half>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
+  <vscale x 2 x float>,
+  <vscale x 4 x half>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 4 x half>,
+  <vscale x 2 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
+  <vscale x 2 x float>,
+  <vscale x 8 x half>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 8 x half>,
+  <vscale x 2 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
+  <vscale x 2 x float>,
+  <vscale x 16 x half>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 16 x half>,
+  <vscale x 2 x float>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
+  <vscale x 2 x float>,
+  <vscale x 32 x half>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
+    <vscale x 2 x float> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 2 x float> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
+  <vscale x 2 x float>,
+  <vscale x 32 x half>,
+  <vscale x 2 x float>,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
+    <vscale x 2 x float> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
+  <vscale x 1 x double>,
+  <vscale x 2 x float>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x float>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
+  <vscale x 1 x double>,
+  <vscale x 4 x float>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x float>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
+  <vscale x 1 x double>,
+  <vscale x 8 x float>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x float>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
+  <vscale x 1 x double>,
+  <vscale x 16 x float>,
+  <vscale x 1 x double>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
+    <vscale x 1 x double> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 1 x double> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 16 x float>,
+  <vscale x 1 x double>,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll
new file mode 100644
index 0000000000000..d8fabd4906b28
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredusum-rv64.ll
@@ -0,0 +1,508 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   < %s | FileCheck %s
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
+  <vscale x 2 x float>,
+  <vscale x 1 x half>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv1f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 1 x half>,
+  <vscale x 2 x float>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
+  <vscale x 2 x float>,
+  <vscale x 2 x half>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv2f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x half>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
+  <vscale x 2 x float>,
+  <vscale x 4 x half>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv4f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 4 x half>,
+  <vscale x 2 x float>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
+  <vscale x 2 x float>,
+  <vscale x 8 x half>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv8f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 8 x half>,
+  <vscale x 2 x float>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
+  <vscale x 2 x float>,
+  <vscale x 16 x half>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv16f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 16 x half>,
+  <vscale x 2 x float>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
+  <vscale x 2 x float>,
+  <vscale x 32 x half>,
+  <vscale x 2 x float>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv2f32_nxv32f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16(
+    <vscale x 2 x float> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 2 x float> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
+  <vscale x 2 x float>,
+  <vscale x 32 x half>,
+  <vscale x 2 x float>,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16(
+    <vscale x 2 x float> %0,
+    <vscale x 32 x half> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x double>,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
+  <vscale x 1 x double>,
+  <vscale x 2 x float>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv2f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 2 x float>,
+  <vscale x 1 x double>,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
+  <vscale x 1 x double>,
+  <vscale x 4 x float>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv4f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 4 x float>,
+  <vscale x 1 x double>,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v10, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
+  <vscale x 1 x double>,
+  <vscale x 8 x float>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv8f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 8 x float>,
+  <vscale x 1 x double>,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v12, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
+  <vscale x 1 x double>,
+  <vscale x 16 x float>,
+  <vscale x 1 x double>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_vs_nxv1f64_nxv16f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32(
+    <vscale x 1 x double> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 1 x double> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32(
+  <vscale x 1 x double>,
+  <vscale x 16 x float>,
+  <vscale x 1 x double>,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vfwredusum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
+; CHECK-NEXT:    vfwredusum.vs v8, v16, v9, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32(
+    <vscale x 1 x double> %0,
+    <vscale x 16 x float> %1,
+    <vscale x 1 x double> %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}


        


More information about the cfe-commits mailing list