[clang] 0608bbd - [RISCV] Rename assembler mnemonic of unordered floating-point reductions for v1.0-rc change

Ben Shi via cfe-commits cfe-commits at lists.llvm.org
Mon Oct 11 23:57:21 PDT 2021


Author: jacquesguan
Date: 2021-10-12T06:46:46Z
New Revision: 0608bbd4e8d5227ee73470840ae8988f01b4604d

URL: https://github.com/llvm/llvm-project/commit/0608bbd4e8d5227ee73470840ae8988f01b4604d
DIFF: https://github.com/llvm/llvm-project/commit/0608bbd4e8d5227ee73470840ae8988f01b4604d.diff

LOG: [RISCV] Rename assembler mnemonic of unordered floating-point reductions for v1.0-rc change

Rename vfredsum and vfwredsum to vfredusum and vfwredusum. Add aliases for vfredsum and vfwredsum.

Reviewed By: luismarques, HsiangKai, khchen, frasercrmck, kito-cheng, craig.topper

Differential Revision: https://reviews.llvm.org/D105690

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
    llvm/test/MC/RISCV/rvv/aliases.s
    llvm/test/MC/RISCV/rvv/freduction.s

Removed: 
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 5ef587429896b..6de76aae4cdf8 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2006,11 +2006,11 @@ let HasMaskedOffOperand = false in {
 // 15.3. Vector Single-Width Floating-Point Reduction Instructions
 defm vfredmax : RVVFloatingReductionBuiltin;
 defm vfredmin : RVVFloatingReductionBuiltin;
-defm vfredsum : RVVFloatingReductionBuiltin;
+defm vfredusum : RVVFloatingReductionBuiltin;
 defm vfredosum : RVVFloatingReductionBuiltin;
 
 // 15.4. Vector Widening Floating-Point Reduction Instructions
-defm vfwredsum : RVVFloatingWidenReductionBuiltin;
+defm vfwredusum : RVVFloatingWidenReductionBuiltin;
 defm vfwredosum : RVVFloatingWidenReductionBuiltin;
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
index 4555884d97511..cda20b2d98d12 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfredsum.c
@@ -5,194 +5,212 @@
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
-                                           vfloat32mf2_t vector,
-                                           vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
-                                             vfloat32mf2_t vector,
-                                             vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
-                                            vfloat32m1_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
-                                            vfloat32m2_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
-                                            vfloat32m4_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
-                                            vfloat32m8_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
-                                            vfloat64m1_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                             vfloat64m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
-                                            vfloat64m2_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat64m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
-                                            vfloat64m4_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat64m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
-                                            vfloat64m8_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat64m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, dst, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
index 14049e1ea5cf2..2ca8eb4b1b4c6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwredsum.c
@@ -5,114 +5,124 @@
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
-                                            vfloat32mf2_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dst,
+                                             vfloat32mf2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
-                                           vfloat32m1_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dst,
+                                            vfloat32m1_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
-                                           vfloat32m2_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dst,
+                                            vfloat32m2_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
-                                           vfloat32m4_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dst,
+                                            vfloat32m4_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
-                                           vfloat32m8_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dst,
+                                            vfloat32m8_t vector,
+                                            vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
-                                              vfloat32mf2_t vector,
-                                              vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                               vfloat32mf2_t vector,
+                                               vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
-                                             vfloat32m1_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                              vfloat32m1_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
-                                             vfloat32m2_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                              vfloat32m2_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
-                                             vfloat32m4_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                              vfloat32m4_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
+//
+// CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
-                                             vfloat32m8_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
+                                              vfloat32m8_t vector,
+                                              vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, dst, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
index 6c1ad4e765930..b38bbc23b2524 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfredsum.c
@@ -6,194 +6,194 @@
 
 #include <riscv_vector.h>
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1(vfloat32m1_t dst,
-                                           vfloat32mf2_t vector,
-                                           vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dst,
+                                            vfloat32mf2_t vector,
+                                            vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32mf2_f32m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m1_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dst, vfloat32m1_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m1_f32m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m2_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dst, vfloat32m2_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m2_f32m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m4_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dst, vfloat32m4_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m4_f32m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
-                                          vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m8_f32m1(dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dst, vfloat32m8_t vector,
+                                           vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m8_f32m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m1_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dst, vfloat64m1_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m1_f64m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m2_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dst, vfloat64m2_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m2_f64m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m4_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dst, vfloat64m4_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m4_f64m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
-                                          vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m8_f64m1(dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dst, vfloat64m8_t vector,
+                                           vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m8_f64m1(dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32mf2_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
-                                             vfloat32mf2_t vector,
-                                             vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dst,
+                                              vfloat32mf2_t vector,
+                                              vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32mf2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m1_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
-                                            vfloat32m1_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
+                                             vfloat32m1_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m1_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m2_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
-                                            vfloat32m2_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dst,
+                                             vfloat32m2_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m2_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m4_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
-                                            vfloat32m4_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dst,
+                                             vfloat32m4_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m4_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f32m8_f32m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
-                                            vfloat32m8_t vector,
-                                            vfloat32m1_t scalar, size_t vl) {
-  return vfredsum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dst,
+                                             vfloat32m8_t vector,
+                                             vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m8_f32m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m1_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
-                                            vfloat64m1_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
+                                             vfloat64m1_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m1_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m2_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
-                                            vfloat64m2_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
+                                             vfloat64m2_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m2_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m4_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
-                                            vfloat64m4_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
+                                             vfloat64m4_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m4_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f64m8_f64m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
-                                            vfloat64m8_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfredsum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
+                                             vfloat64m8_t vector,
+                                             vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
@@ -394,112 +394,112 @@ vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
   return vfredosum_vs_f64m8_f64m1_m(mask, dst, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf4_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf2_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m1_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m1_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m1_f16m1(dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m2_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m2_f16m1(dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m4_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m4_f16m1(dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m8_f16m1(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m8_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m8_f16m1(dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf4_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16mf2_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m1_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m2_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m4_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
-// CHECK-RV64-LABEL: @test_vfredsum_vs_f16m8_f16m1_m(
+// CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredsum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1(
@@ -507,7 +507,7 @@ vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
 }
 
@@ -516,7 +516,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1 (vfloat16m1_t dest, vfloat16mf4_t ve
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
 }
 
@@ -525,7 +525,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1 (vfloat16m1_t dest, vfloat16mf2_t ve
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl);
 }
 
@@ -534,7 +534,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1 (vfloat16m1_t dest, vfloat16m1_t vect
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl);
 }
 
@@ -543,7 +543,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1 (vfloat16m1_t dest, vfloat16m2_t vect
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl);
 }
 
@@ -552,7 +552,7 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1 (vfloat16m1_t dest, vfloat16m4_t vect
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl);
 }
 
@@ -561,7 +561,7 @@ vfloat16m1_t test_vfredosum_vs_f16m8_f16m1 (vfloat16m1_t dest, vfloat16m8_t vect
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
@@ -570,7 +570,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m (vbool64_t mask, vfloat16m1_t dest
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
@@ -579,7 +579,7 @@ vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m (vbool32_t mask, vfloat16m1_t dest
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
@@ -588,7 +588,7 @@ vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m (vbool16_t mask, vfloat16m1_t dest,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
@@ -597,7 +597,7 @@ vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m (vbool8_t mask, vfloat16m1_t dest,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
 }
 
@@ -606,6 +606,6 @@ vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m (vbool4_t mask, vfloat16m1_t dest,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m (vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
   return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
 }

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
deleted file mode 100644
index aefde6483960d..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwredsum.c
+++ /dev/null
@@ -1,225 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d \
-// RUN:   -target-feature +experimental-v -target-feature +experimental-zfh \
-// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1(vfloat64m1_t dst,
-                                            vfloat32mf2_t vector,
-                                            vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32mf2_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t dst,
-                                           vfloat32m1_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m1_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t dst,
-                                           vfloat32m2_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m2_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t dst,
-                                           vfloat32m4_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m4_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t dst,
-                                           vfloat32m8_t vector,
-                                           vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m8_f64m1(dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32mf2_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
-                                              vfloat32mf2_t vector,
-                                              vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32mf2_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m1_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dst,
-                                             vfloat32m1_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m1_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m2_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dst,
-                                             vfloat32m2_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m2_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m4_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dst,
-                                             vfloat32m4_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m4_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f32m8_f64m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dst,
-                                             vfloat32m8_t vector,
-                                             vfloat64m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f32m8_f64m1_m(mask, dst, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf4_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf4_f32m1 (vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf2_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf2_f32m1 (vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m1_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1 (vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m1_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m2_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1 (vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m2_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m4_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1 (vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m4_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m8_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1 (vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m8_f32m1(dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf4_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf4_f32m1_m (vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16mf2_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16mf2_f32m1_m (vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m1_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m (vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m2_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m (vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m4_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m (vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vfwredsum_vs_f16m8_f32m1_m(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m (vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredsum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
-}

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index bc69f7566e9b3..3dad1581e1fb1 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1185,11 +1185,11 @@ let TargetPrefix = "riscv" in {
   defm vwredsum : RISCVReduction;
 
   defm vfredosum : RISCVReduction;
-  defm vfredsum : RISCVReduction;
+  defm vfredusum : RISCVReduction;
   defm vfredmin : RISCVReduction;
   defm vfredmax : RISCVReduction;
 
-  defm vfwredsum : RISCVReduction;
+  defm vfwredusum : RISCVReduction;
   defm vfwredosum : RISCVReduction;
 
   def int_riscv_vmand: RISCVBinaryAAANoMask;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index b3df212a3a9ef..cbc344411a479 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -1333,11 +1333,14 @@ let Predicates = [HasStdExtV, HasStdExtF] in {
 // Vector Single-Width Floating-Point Reduction Instructions
 let RVVConstraint = NoConstraint in {
 defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
-defm VFREDSUM : VRED_FV_V<"vfredsum", 0b000001>;
+defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
 defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>;
 defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>;
 } // RVVConstraint = NoConstraint
 
+def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
+                (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
+
 // Vector Widening Floating-Point Reduction Instructions
 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
 // Set earlyclobber for following instructions for second and mask operands.
@@ -1345,10 +1348,14 @@ let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
 // will impose unnecessary restrictions by not allowing the destination to
 // overlap with the first (wide) operand.
 defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
-defm VFWREDSUM : VWRED_FV_V<"vfwredsum", 0b110001>;
+defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
 } // Predicates = [HasStdExtV, HasStdExtF]
 
+def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
+                (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
+
+
 let Predicates = [HasStdExtV] in {
 // Vector Mask-Register Logical Instructions
 let RVVConstraint = NoConstraint in {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 359d5c04ef818..ffbdb88d513e1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3981,14 +3981,14 @@ let Predicates = [HasStdExtV, HasStdExtF] in {
 // 15.3. Vector Single-Width Floating-Point Reduction Instructions
 //===----------------------------------------------------------------------===//
 defm PseudoVFREDOSUM   : VPseudoReductionV_VS;
-defm PseudoVFREDSUM    : VPseudoReductionV_VS;
+defm PseudoVFREDUSUM   : VPseudoReductionV_VS;
 defm PseudoVFREDMIN    : VPseudoReductionV_VS;
 defm PseudoVFREDMAX    : VPseudoReductionV_VS;
 
 //===----------------------------------------------------------------------===//
 // 15.4. Vector Widening Floating-Point Reduction Instructions
 //===----------------------------------------------------------------------===//
-defm PseudoVFWREDSUM   : VPseudoReductionV_VS;
+defm PseudoVFWREDUSUM  : VPseudoReductionV_VS;
 defm PseudoVFWREDOSUM  : VPseudoReductionV_VS;
 
 } // Predicates = [HasStdExtV, HasStdExtF]
@@ -4646,14 +4646,14 @@ let Predicates = [HasStdExtV, HasStdExtF] in {
 // 15.3. Vector Single-Width Floating-Point Reduction Instructions
 //===----------------------------------------------------------------------===//
 defm : VPatReductionV_VS<"int_riscv_vfredosum", "PseudoVFREDOSUM", /*IsFloat=*/1>;
-defm : VPatReductionV_VS<"int_riscv_vfredsum", "PseudoVFREDSUM", /*IsFloat=*/1>;
+defm : VPatReductionV_VS<"int_riscv_vfredusum", "PseudoVFREDUSUM", /*IsFloat=*/1>;
 defm : VPatReductionV_VS<"int_riscv_vfredmin", "PseudoVFREDMIN", /*IsFloat=*/1>;
 defm : VPatReductionV_VS<"int_riscv_vfredmax", "PseudoVFREDMAX", /*IsFloat=*/1>;
 
 //===----------------------------------------------------------------------===//
 // 15.4. Vector Widening Floating-Point Reduction Instructions
 //===----------------------------------------------------------------------===//
-defm : VPatReductionW_VS<"int_riscv_vfwredsum", "PseudoVFWREDSUM", /*IsFloat=*/1>;
+defm : VPatReductionW_VS<"int_riscv_vfwredusum", "PseudoVFWREDUSUM", /*IsFloat=*/1>;
 defm : VPatReductionW_VS<"int_riscv_vfwredosum", "PseudoVFWREDOSUM", /*IsFloat=*/1>;
 
 } // Predicates = [HasStdExtV, HasStdExtF]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index aabbce96261d3..9656ccd36ed13 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -942,7 +942,7 @@ defm : VPatReductionVL<rvv_vecreduce_XOR_vl,  "PseudoVREDXOR", /*is_float*/0>;
 // 15.3. Vector Single-Width Floating-Point Reduction Instructions
 let Predicates = [HasStdExtV, HasStdExtF] in {
 defm : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>;
-defm : VPatReductionVL<rvv_vecreduce_FADD_vl,     "PseudoVFREDSUM", /*is_float*/1>;
+defm : VPatReductionVL<rvv_vecreduce_FADD_vl,     "PseudoVFREDUSUM", /*is_float*/1>;
 defm : VPatReductionVL<rvv_vecreduce_FMIN_vl,     "PseudoVFREDMIN", /*is_float*/1>;
 defm : VPatReductionVL<rvv_vecreduce_FMAX_vl,     "PseudoVFREDMAX", /*is_float*/1>;
 } // Predicates = [HasStdExtV, HasStdExtF]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
index 2fd09cb332608..cbd636088b765 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
@@ -12,7 +12,7 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc half @llvm.vp.reduce.fadd.v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 %evl)
@@ -40,7 +40,7 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc half @llvm.vp.reduce.fadd.v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 %evl)
@@ -68,7 +68,7 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 %evl)
@@ -96,7 +96,7 @@ define float @vpreduce_fadd_v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.v4f32(float %s, <4 x float> %v, <4 x i1> %m, i32 %evl)
@@ -124,7 +124,7 @@ define double @vpreduce_fadd_v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.v2f64(double %s, <2 x double> %v, <2 x i1> %m, i32 %evl)
@@ -152,7 +152,7 @@ define double @vpreduce_fadd_v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.v4f64(double %s, <4 x double> %v, <4 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index b4219cd763109..bc2139c2d81d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -45,7 +45,7 @@ define half @vreduce_fadd_v2f16(<2 x half>* %x, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v26, ft0
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v25, v26
+; CHECK-NEXT:    vfredusum.vs v25, v25, v26
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -82,7 +82,7 @@ define half @vreduce_fadd_v4f16(<4 x half>* %x, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v26, ft0
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v25, v26
+; CHECK-NEXT:    vfredusum.vs v25, v25, v26
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -119,7 +119,7 @@ define half @vreduce_fadd_v8f16(<8 x half>* %x, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v26, ft0
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v25, v26
+; CHECK-NEXT:    vfredusum.vs v25, v25, v26
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -156,7 +156,7 @@ define half @vreduce_fadd_v16f16(<16 x half>* %x, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v26, v25
+; CHECK-NEXT:    vfredusum.vs v25, v26, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -194,7 +194,7 @@ define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
 ; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV32-NEXT:    vfmv.v.f v25, ft0
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; RV32-NEXT:    vfredsum.vs v25, v28, v25
+; RV32-NEXT:    vfredusum.vs v25, v28, v25
 ; RV32-NEXT:    vfmv.f.s ft0, v25
 ; RV32-NEXT:    fadd.h fa0, fa0, ft0
 ; RV32-NEXT:    ret
@@ -209,7 +209,7 @@ define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV64-NEXT:    vfmv.v.f v25, ft0
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; RV64-NEXT:    vfredsum.vs v25, v28, v25
+; RV64-NEXT:    vfredusum.vs v25, v28, v25
 ; RV64-NEXT:    vfmv.f.s ft0, v25
 ; RV64-NEXT:    fadd.h fa0, fa0, ft0
 ; RV64-NEXT:    ret
@@ -248,7 +248,7 @@ define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
 ; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV32-NEXT:    vfmv.v.f v25, ft0
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; RV32-NEXT:    vfredsum.vs v25, v8, v25
+; RV32-NEXT:    vfredusum.vs v25, v8, v25
 ; RV32-NEXT:    vfmv.f.s ft0, v25
 ; RV32-NEXT:    fadd.h fa0, fa0, ft0
 ; RV32-NEXT:    ret
@@ -263,7 +263,7 @@ define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
 ; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV64-NEXT:    vfmv.v.f v25, ft0
 ; RV64-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; RV64-NEXT:    vfredsum.vs v25, v8, v25
+; RV64-NEXT:    vfredusum.vs v25, v8, v25
 ; RV64-NEXT:    vfmv.f.s ft0, v25
 ; RV64-NEXT:    fadd.h fa0, fa0, ft0
 ; RV64-NEXT:    ret
@@ -305,7 +305,7 @@ define half @vreduce_fadd_v128f16(<128 x half>* %x, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -381,7 +381,7 @@ define float @vreduce_fadd_v2f32(<2 x float>* %x, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v26, ft0
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v25, v26
+; CHECK-NEXT:    vfredusum.vs v25, v25, v26
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -418,7 +418,7 @@ define float @vreduce_fadd_v4f32(<4 x float>* %x, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v26, ft0
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v25, v26
+; CHECK-NEXT:    vfredusum.vs v25, v25, v26
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -455,7 +455,7 @@ define float @vreduce_fadd_v8f32(<8 x float>* %x, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v26, v25
+; CHECK-NEXT:    vfredusum.vs v25, v26, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -492,7 +492,7 @@ define float @vreduce_fadd_v16f32(<16 x float>* %x, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v28, v25
+; CHECK-NEXT:    vfredusum.vs v25, v28, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -530,7 +530,7 @@ define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
 ; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV32-NEXT:    vfmv.v.f v25, ft0
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV32-NEXT:    vfredsum.vs v25, v8, v25
+; RV32-NEXT:    vfredusum.vs v25, v8, v25
 ; RV32-NEXT:    vfmv.f.s ft0, v25
 ; RV32-NEXT:    fadd.s fa0, fa0, ft0
 ; RV32-NEXT:    ret
@@ -545,7 +545,7 @@ define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
 ; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV64-NEXT:    vfmv.v.f v25, ft0
 ; RV64-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; RV64-NEXT:    vfredsum.vs v25, v8, v25
+; RV64-NEXT:    vfredusum.vs v25, v8, v25
 ; RV64-NEXT:    vfmv.f.s ft0, v25
 ; RV64-NEXT:    fadd.s fa0, fa0, ft0
 ; RV64-NEXT:    ret
@@ -587,7 +587,7 @@ define float @vreduce_fadd_v64f32(<64 x float>* %x, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -663,7 +663,7 @@ define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v26, ft0
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v25, v26
+; CHECK-NEXT:    vfredusum.vs v25, v25, v26
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -700,7 +700,7 @@ define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v26, v25
+; CHECK-NEXT:    vfredusum.vs v25, v26, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -737,7 +737,7 @@ define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v28, v25
+; CHECK-NEXT:    vfredusum.vs v25, v28, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -774,7 +774,7 @@ define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -814,7 +814,7 @@ define double @vreduce_fadd_v32f64(<32 x double>* %x, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
deleted file mode 100644
index ec499aee244d7..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    i32 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
deleted file mode 100644
index abb89b5ae45e3..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
+++ /dev/null
@@ -1,692 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
-  <vscale x 4 x half>,
-  <vscale x 1 x half>,
-  <vscale x 4 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
-    <vscale x 4 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
-  <vscale x 4 x half>,
-  <vscale x 2 x half>,
-  <vscale x 4 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
-    <vscale x 4 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
-  <vscale x 4 x half>,
-  <vscale x 8 x half>,
-  <vscale x 4 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
-    <vscale x 4 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
-  <vscale x 4 x half>,
-  <vscale x 16 x half>,
-  <vscale x 4 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
-    <vscale x 4 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
-  <vscale x 4 x half>,
-  <vscale x 32 x half>,
-  <vscale x 4 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
-    <vscale x 4 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 4 x half> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x float>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x float>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x float>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x float>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x double>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x double>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x double>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vfredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
deleted file mode 100644
index 228ff8b8af1fd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
+++ /dev/null
@@ -1,508 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    i32 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  <vscale x 32 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    i32 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  <vscale x 16 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
deleted file mode 100644
index 7ed535ed89490..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
+++ /dev/null
@@ -1,508 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv1f16(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 1 x half>,
-  <vscale x 2 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv1f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv2f16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x half>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv2f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv4f16(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 4 x half>,
-  <vscale x 2 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv4f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv8f16(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 8 x half>,
-  <vscale x 2 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv8f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv16f16(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 16 x half>,
-  <vscale x 2 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv16f16.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
-  <vscale x 2 x float>,
-  <vscale x 32 x half>,
-  <vscale x 2 x float>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
-    <vscale x 2 x float> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 2 x float> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv1f32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x float>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv1f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv2f32(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 2 x float>,
-  <vscale x 1 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv2f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv4f32(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 4 x float>,
-  <vscale x 1 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv4f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv8f32(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 8 x float>,
-  <vscale x 1 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v12, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv8f32.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
-  <vscale x 1 x double>,
-  <vscale x 16 x float>,
-  <vscale x 1 x double>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vfwredsum.vs v8, v16, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
-    <vscale x 1 x double> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 1 x double> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret <vscale x 1 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index 9c0199ba621c3..07b224f38f155 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -14,7 +14,7 @@ define half @vreduce_fadd_nxv1f16(<vscale x 1 x half> %v, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -45,7 +45,7 @@ define half @vreduce_fadd_nxv2f16(<vscale x 2 x half> %v, half %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -75,7 +75,7 @@ define half @vreduce_fadd_nxv4f16(<vscale x 4 x half> %v, half %s) {
 ; CHECK-NEXT:    flh ft0, %lo(.LCPI4_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.h fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -105,7 +105,7 @@ define float @vreduce_fadd_nxv1f32(<vscale x 1 x float> %v, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -135,7 +135,7 @@ define float @vreduce_fadd_nxv2f32(<vscale x 2 x float> %v, float %s) {
 ; CHECK-NEXT:    flw ft0, %lo(.LCPI8_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -165,7 +165,7 @@ define float @vreduce_fadd_nxv4f32(<vscale x 4 x float> %v, float %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.s fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -195,7 +195,7 @@ define double @vreduce_fadd_nxv1f64(<vscale x 1 x double> %v, double %s) {
 ; CHECK-NEXT:    fld ft0, %lo(.LCPI12_0)(a0)
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -225,7 +225,7 @@ define double @vreduce_fadd_nxv2f64(<vscale x 2 x double> %v, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret
@@ -256,7 +256,7 @@ define double @vreduce_fadd_nxv4f64(<vscale x 4 x double> %v, double %s) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, ft0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25
 ; CHECK-NEXT:    vfmv.f.s ft0, v25
 ; CHECK-NEXT:    fadd.d fa0, fa0, ft0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index 195f3121ebfd4..588c97231504d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -12,7 +12,7 @@ define half @vpreduce_fadd_nxv1f16(half %s, <vscale x 1 x half> %v, <vscale x 1
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc half @llvm.vp.reduce.fadd.nxv1f16(half %s, <vscale x 1 x half> %v, <vscale x 1 x i1> %m, i32 %evl)
@@ -40,7 +40,7 @@ define half @vpreduce_fadd_nxv2f16(half %s, <vscale x 2 x half> %v, <vscale x 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc half @llvm.vp.reduce.fadd.nxv2f16(half %s, <vscale x 2 x half> %v, <vscale x 2 x i1> %m, i32 %evl)
@@ -68,7 +68,7 @@ define half @vpreduce_fadd_nxv4f16(half %s, <vscale x 4 x half> %v, <vscale x 4
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc half @llvm.vp.reduce.fadd.nxv4f16(half %s, <vscale x 4 x half> %v, <vscale x 4 x i1> %m, i32 %evl)
@@ -96,7 +96,7 @@ define float @vpreduce_fadd_nxv1f32(float %s, <vscale x 1 x float> %v, <vscale x
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.nxv1f32(float %s, <vscale x 1 x float> %v, <vscale x 1 x i1> %m, i32 %evl)
@@ -124,7 +124,7 @@ define float @vpreduce_fadd_nxv2f32(float %s, <vscale x 2 x float> %v, <vscale x
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.nxv2f32(float %s, <vscale x 2 x float> %v, <vscale x 2 x i1> %m, i32 %evl)
@@ -152,7 +152,7 @@ define float @vpreduce_fadd_nxv4f32(float %s, <vscale x 4 x float> %v, <vscale x
 ; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc float @llvm.vp.reduce.fadd.nxv4f32(float %s, <vscale x 4 x float> %v, <vscale x 4 x i1> %m, i32 %evl)
@@ -180,7 +180,7 @@ define double @vpreduce_fadd_nxv1f64(double %s, <vscale x 1 x double> %v, <vscal
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.nxv1f64(double %s, <vscale x 1 x double> %v, <vscale x 1 x i1> %m, i32 %evl)
@@ -208,7 +208,7 @@ define double @vpreduce_fadd_nxv2f64(double %s, <vscale x 2 x double> %v, <vscal
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.nxv2f64(double %s, <vscale x 2 x double> %v, <vscale x 2 x i1> %m, i32 %evl)
@@ -236,7 +236,7 @@ define double @vpreduce_fadd_nxv4f64(double %s, <vscale x 4 x double> %v, <vscal
 ; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
 ; CHECK-NEXT:    vfmv.v.f v25, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vfredsum.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    vfredusum.vs v25, v8, v25, v0.t
 ; CHECK-NEXT:    vfmv.f.s fa0, v25
 ; CHECK-NEXT:    ret
   %r = call reassoc double @llvm.vp.reduce.fadd.nxv4f64(double %s, <vscale x 4 x double> %v, <vscale x 4 x i1> %m, i32 %evl)

diff  --git a/llvm/test/MC/RISCV/rvv/aliases.s b/llvm/test/MC/RISCV/rvv/aliases.s
index d6fc622de2c3e..ee0de452ab6b7 100644
--- a/llvm/test/MC/RISCV/rvv/aliases.s
+++ b/llvm/test/MC/RISCV/rvv/aliases.s
@@ -84,3 +84,9 @@ vle1.v v8, (a0)
 # ALIAS:    vsm.v           v8, (a0)         # encoding: [0x27,0x04,0xb5,0x02]
 # NO-ALIAS: vsm.v           v8, (a0)         # encoding: [0x27,0x04,0xb5,0x02]
 vse1.v v8, (a0)
+# ALIAS:    vfredusum.vs v8, v4, v20, v0.t    # encoding: [0x57,0x14,0x4a,0x04]
+# NO-ALIAS: vfredusum.vs v8, v4, v20, v0.t   # encoding: [0x57,0x14,0x4a,0x04]
+vfredsum.vs v8, v4, v20, v0.t
+# ALIAS:    vfwredusum.vs v8, v4, v20, v0.t   # encoding: [0x57,0x14,0x4a,0xc4]
+# NO-ALIAS: vfwredusum.vs v8, v4, v20, v0.t  # encoding: [0x57,0x14,0x4a,0xc4]
+vfwredsum.vs v8, v4, v20, v0.t

diff  --git a/llvm/test/MC/RISCV/rvv/freduction.s b/llvm/test/MC/RISCV/rvv/freduction.s
index 4ecc9095f6239..c477dfefa487b 100644
--- a/llvm/test/MC/RISCV/rvv/freduction.s
+++ b/llvm/test/MC/RISCV/rvv/freduction.s
@@ -1,12 +1,12 @@
 # RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \
-# RUN:         --mattr=+f \
+# RUN:         --mattr=+f --riscv-no-aliases \
 # RUN:        | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
 # RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
 # RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
 # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
 # RUN:         --mattr=+f \
-# RUN:        | llvm-objdump -d --mattr=+experimental-v --mattr=+f - \
-# RUN:        | FileCheck %s --check-prefix=CHECK-INST
+# RUN:        | llvm-objdump -d --mattr=+experimental-v --mattr=+f \
+# RUN:        -M no-aliases - | FileCheck %s --check-prefix=CHECK-INST
 # RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
 # RUN:         --mattr=+f \
 # RUN:        | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
@@ -23,14 +23,14 @@ vfredosum.vs v8, v4, v20
 # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V'
 # CHECK-UNKNOWN: 57 14 4a 0e <unknown>
 
-vfredsum.vs v8, v4, v20, v0.t
-# CHECK-INST: vfredsum.vs v8, v4, v20, v0.t
+vfredusum.vs v8, v4, v20, v0.t
+# CHECK-INST: vfredusum.vs v8, v4, v20, v0.t
 # CHECK-ENCODING: [0x57,0x14,0x4a,0x04]
 # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V'
 # CHECK-UNKNOWN: 57 14 4a 04 <unknown>
 
-vfredsum.vs v8, v4, v20
-# CHECK-INST: vfredsum.vs v8, v4, v20
+vfredusum.vs v8, v4, v20
+# CHECK-INST: vfredusum.vs v8, v4, v20
 # CHECK-ENCODING: [0x57,0x14,0x4a,0x06]
 # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V'
 # CHECK-UNKNOWN: 57 14 4a 06 <unknown>
@@ -71,14 +71,14 @@ vfwredosum.vs v8, v4, v20
 # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V'
 # CHECK-UNKNOWN: 57 14 4a ce <unknown>
 
-vfwredsum.vs v8, v4, v20, v0.t
-# CHECK-INST: vfwredsum.vs v8, v4, v20, v0.t
+vfwredusum.vs v8, v4, v20, v0.t
+# CHECK-INST: vfwredusum.vs v8, v4, v20, v0.t
 # CHECK-ENCODING: [0x57,0x14,0x4a,0xc4]
 # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V'
 # CHECK-UNKNOWN: 57 14 4a c4 <unknown>
 
-vfwredsum.vs v8, v4, v20
-# CHECK-INST: vfwredsum.vs v8, v4, v20
+vfwredusum.vs v8, v4, v20
+# CHECK-INST: vfwredusum.vs v8, v4, v20
 # CHECK-ENCODING: [0x57,0x14,0x4a,0xc6]
 # CHECK-ERROR: instruction requires the following: 'F'{{.*}}'V'
 # CHECK-UNKNOWN: 57 14 4a c6 <unknown>


        


More information about the cfe-commits mailing list