[llvm-branch-commits] [llvm] 253dc16 - [RISCV] Cleanup some V intrinsic names used in tests to match the type overloads used. Add some missing double tests on rv32. NFC

Craig Topper via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Dec 30 12:42:35 PST 2020


Author: Craig Topper
Date: 2020-12-30T12:37:11-08:00
New Revision: 253dc16f9eb0701305272dc5a438e1f766b4ad60

URL: https://github.com/llvm/llvm-project/commit/253dc16f9eb0701305272dc5a438e1f766b4ad60
DIFF: https://github.com/llvm/llvm-project/commit/253dc16f9eb0701305272dc5a438e1f766b4ad60.diff

LOG: [RISCV] Cleanup some V intrinsic names used in tests to match the type overloads used. Add some missing double tests on rv32. NFC

The matching for intrinsic names is forgiving about types in the
name being absent or wrong. Once the intrinsic is parsed its
name will remangled to include the real types.

This commit fixes the names to have at least enough correct types
so that the name used in the test is a prefix of the canonical name.
The big missing part is the type for the VL parameter which changes
size between rv32 and rv64.

While I was in here I noticed that we were missing some tests for
double on rv32 so I fixed that by copying from rv64 and fixing up
the VL argument type.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
index 1acdc7f06e3c..5428dbcc9f18 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst-rv32.ll
@@ -1,32 +1,32 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare i32 @llvm.riscv.vfirst.i64.nxv1i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv1i1(
   <vscale x 1 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv1i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv1i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv1i1(
     <vscale x 1 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv1i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv1i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv1i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
     i32 %2)
@@ -34,33 +34,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.i64.nxv2i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv2i1(
   <vscale x 2 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv2i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv2i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv2i1(
     <vscale x 2 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv2i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv2i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv2i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
     i32 %2)
@@ -68,33 +68,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.i64.nxv4i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv4i1(
   <vscale x 4 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv4i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv4i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv4i1(
     <vscale x 4 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv4i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv4i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv4i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
     i32 %2)
@@ -102,33 +102,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.i64.nxv8i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv8i1(
   <vscale x 8 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv8i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv8i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv8i1(
     <vscale x 8 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv8i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv8i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv8i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
     i32 %2)
@@ -136,33 +136,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.i64.nxv16i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv16i1(
   <vscale x 16 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv16i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv16i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv16i1(
     <vscale x 16 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv16i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv16i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv16i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
     i32 %2)
@@ -170,33 +170,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.i64.nxv32i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv32i1(
   <vscale x 32 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv32i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv32i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv32i1(
     <vscale x 32 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv32i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv32i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv32i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
     i32 %2)
@@ -204,33 +204,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.i64.nxv64i1(
+declare i32 @llvm.riscv.vfirst.i32.nxv64i1(
   <vscale x 64 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_m_i64_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vfirst_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1
+; CHECK-LABEL: intrinsic_vfirst_m_i32_nxv64i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vfirst.i64.nxv64i1(
+  %a = call i32 @llvm.riscv.vfirst.i32.nxv64i1(
     <vscale x 64 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vfirst.mask.i64.nxv64i1(
+declare i32 @llvm.riscv.vfirst.mask.i32.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
   i32);
 
-define i32 @intrinsic_vfirst_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vfirst_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1
+; CHECK-LABEL: intrinsic_vfirst_mask_m_i32_nxv64i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
 ; CHECK:       vfirst.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vfirst.mask.i64.nxv64i1(
+  %a = call i32 @llvm.riscv.vfirst.mask.i32.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
     i32 %2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
index 521c6b4ccbf9..b3a095d34572 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll
@@ -1,419 +1,419 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -target-abi ilp32d -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
+declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
   i32);
 
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
     i32 %1)
 
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
+declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
   half,
   i32);
 
-define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
     i32 %1)
 
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
+declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
   half,
   i32);
 
-define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
     i32 %1)
 
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
+declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
   half,
   i32);
 
-define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
     i32 %1)
 
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
+declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
   half,
   i32);
 
-define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
     i32 %1)
 
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
+declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
   half,
   i32);
 
-define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
     i32 %1)
 
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
+declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
   float,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
     i32 %1)
 
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
+declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
   float,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
     i32 %1)
 
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
+declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
   float,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
     i32 %1)
 
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
+declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
   float,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
     i32 %1)
 
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
+declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
   float,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
     i32 %1)
 
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
+declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
   double,
   i32);
 
-define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
     i32 %1)
 
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
+declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
   double,
   i32);
 
-define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
     i32 %1)
 
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
+declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
   double,
   i32);
 
-define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
     i32 %1)
 
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
+declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
   double,
   i32);
 
-define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
     i32 %1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i32 %0) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
     i32 %0)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16_f16(i32 %0) nounwind {
+define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
     i32 %0)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16_f16(i32 %0) nounwind {
+define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
     i32 %0)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16_f16(i32 %0) nounwind {
+define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
     i32 %0)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16_f16(i32 %0) nounwind {
+define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
     i32 %0)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16_f16(i32 %0) nounwind {
+define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
     i32 %0)
 
   ret <vscale x 32 x half> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32_f32(i32 %0) nounwind {
+define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
     i32 %0)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32_f32(i32 %0) nounwind {
+define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
     i32 %0)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32_f32(i32 %0) nounwind {
+define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
     i32 %0)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32_f32(i32 %0) nounwind {
+define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
     i32 %0)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32_f32(i32 %0) nounwind {
+define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
     i32 %0)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64_f64(i32 %0) nounwind {
+define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
     i32 %0)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64_f64(i32 %0) nounwind {
+define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
     i32 %0)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64_f64(i32 %0) nounwind {
+define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
     i32 %0)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64_f64(i32 %0) nounwind {
+define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
     i32 %0)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
index 525fa0927526..f781c1f0397b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll
@@ -1,419 +1,419 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
+declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
   half,
   i64);
 
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i64 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.v.f_f_nxv1f16(half %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half %0,
     i64 %1)
 
   ret <vscale x 1 x half> %a
 }
 
-declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
+declare <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
   half,
   i64);
 
-define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i64 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vfmv.v.f_f_nxv2f16(half %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half %0,
     i64 %1)
 
   ret <vscale x 2 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
+declare <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
   half,
   i64);
 
-define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i64 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vfmv.v.f_f_nxv4f16(half %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half %0,
     i64 %1)
 
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
+declare <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
   half,
   i64);
 
-define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i64 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vfmv.v.f_f_nxv8f16(half %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half %0,
     i64 %1)
 
   ret <vscale x 8 x half> %a
 }
 
-declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
+declare <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
   half,
   i64);
 
-define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i64 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vfmv.v.f_f_nxv16f16(half %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half %0,
     i64 %1)
 
   ret <vscale x 16 x half> %a
 }
 
-declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
+declare <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
   half,
   i64);
 
-define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i64 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vfmv.v.f_f_nxv32f16(half %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half %0,
     i64 %1)
 
   ret <vscale x 32 x half> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
+declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
   float,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i64 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vfmv.v.f_f_nxv1f32(float %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float %0,
     i64 %1)
 
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
+declare <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
   float,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i64 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vfmv.v.f_f_nxv2f32(float %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float %0,
     i64 %1)
 
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
+declare <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
   float,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i64 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vfmv.v.f_f_nxv4f32(float %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float %0,
     i64 %1)
 
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
+declare <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
   float,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i64 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vfmv.v.f_f_nxv8f32(float %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float %0,
     i64 %1)
 
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
+declare <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
   float,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i64 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vfmv.v.f_f_nxv16f32(float %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float %0,
     i64 %1)
 
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
+declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
   double,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i64 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vfmv.v.f_f_nxv1f64(double %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double %0,
     i64 %1)
 
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
+declare <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
   double,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i64 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vfmv.v.f_f_nxv2f64(double %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double %0,
     i64 %1)
 
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
+declare <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
   double,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i64 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vfmv.v.f_f_nxv4f64(double %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double %0,
     i64 %1)
 
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
+declare <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
   double,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i64 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vfmv.v.f_f_nxv8f64(double %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64
+; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
 ; CHECK:       vfmv.v.f {{v[0-9]+}}, fa0
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double %0,
     i64 %1)
 
   ret <vscale x 8 x double> %a
 }
 
-define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i64 %0) nounwind {
+define <vscale x 1 x half> @intrinsic_vfmv.v.f_zero_nxv1f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16.f16(
+  %a = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(
     half 0.0,
     i64 %0)
 
   ret <vscale x 1 x half> %a
 }
 
-define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16_f16(i64 %0) nounwind {
+define <vscale x 2 x half> @intrinsic_vmv.v.x_zero_nxv2f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16.f16(
+  %a = call <vscale x 2 x half> @llvm.riscv.vfmv.v.f.nxv2f16(
     half 0.0,
     i64 %0)
 
   ret <vscale x 2 x half> %a
 }
 
-define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16_f16(i64 %0) nounwind {
+define <vscale x 4 x half> @intrinsic_vmv.v.x_zero_nxv4f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16.f16(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfmv.v.f.nxv4f16(
     half 0.0,
     i64 %0)
 
   ret <vscale x 4 x half> %a
 }
 
-define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16_f16(i64 %0) nounwind {
+define <vscale x 8 x half> @intrinsic_vmv.v.x_zero_nxv8f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16.f16(
+  %a = call <vscale x 8 x half> @llvm.riscv.vfmv.v.f.nxv8f16(
     half 0.0,
     i64 %0)
 
   ret <vscale x 8 x half> %a
 }
 
-define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16_f16(i64 %0) nounwind {
+define <vscale x 16 x half> @intrinsic_vmv.v.x_zero_nxv16f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16.f16(
+  %a = call <vscale x 16 x half> @llvm.riscv.vfmv.v.f.nxv16f16(
     half 0.0,
     i64 %0)
 
   ret <vscale x 16 x half> %a
 }
 
-define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16_f16(i64 %0) nounwind {
+define <vscale x 32 x half> @intrinsic_vmv.v.x_zero_nxv32f16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16.f16(
+  %a = call <vscale x 32 x half> @llvm.riscv.vfmv.v.f.nxv32f16(
     half 0.0,
     i64 %0)
 
   ret <vscale x 32 x half> %a
 }
 
-define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32_f32(i64 %0) nounwind {
+define <vscale x 1 x float> @intrinsic_vmv.v.x_zero_nxv1f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32.f32(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(
     float 0.0,
     i64 %0)
 
   ret <vscale x 1 x float> %a
 }
 
-define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32_f32(i64 %0) nounwind {
+define <vscale x 2 x float> @intrinsic_vmv.v.x_zero_nxv2f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32.f32(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfmv.v.f.nxv2f32(
     float 0.0,
     i64 %0)
 
   ret <vscale x 2 x float> %a
 }
 
-define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32_f32(i64 %0) nounwind {
+define <vscale x 4 x float> @intrinsic_vmv.v.x_zero_nxv4f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32.f32(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfmv.v.f.nxv4f32(
     float 0.0,
     i64 %0)
 
   ret <vscale x 4 x float> %a
 }
 
-define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32_f32(i64 %0) nounwind {
+define <vscale x 8 x float> @intrinsic_vmv.v.x_zero_nxv8f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32.f32(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfmv.v.f.nxv8f32(
     float 0.0,
     i64 %0)
 
   ret <vscale x 8 x float> %a
 }
 
-define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32_f32(i64 %0) nounwind {
+define <vscale x 16 x float> @intrinsic_vmv.v.x_zero_nxv16f32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32.f32(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfmv.v.f.nxv16f32(
     float 0.0,
     i64 %0)
 
   ret <vscale x 16 x float> %a
 }
 
-define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64_f64(i64 %0) nounwind {
+define <vscale x 1 x double> @intrinsic_vmv.v.x_zero_nxv1f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64.f64(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(
     double 0.0,
     i64 %0)
 
   ret <vscale x 1 x double> %a
 }
 
-define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64_f64(i64 %0) nounwind {
+define <vscale x 2 x double> @intrinsic_vmv.v.x_zero_nxv2f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64.f64(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfmv.v.f.nxv2f64(
     double 0.0,
     i64 %0)
 
   ret <vscale x 2 x double> %a
 }
 
-define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64_f64(i64 %0) nounwind {
+define <vscale x 4 x double> @intrinsic_vmv.v.x_zero_nxv4f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64.f64(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfmv.v.f.nxv4f64(
     double 0.0,
     i64 %0)
 
   ret <vscale x 4 x double> %a
 }
 
-define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64_f64(i64 %0) nounwind {
+define <vscale x 8 x double> @intrinsic_vmv.v.x_zero_nxv8f64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64
+; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8,ta,mu
 ; CHECK:       vmv.v.x {{v[0-9]+}}, zero
-  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64.f64(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfmv.v.f.nxv8f64(
     double 0.0,
     i64 %0)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
index 6d2fa56686f8..6be0bf7e9206 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
index 2977a633a4a3..dd02b5237511 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmax-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vfredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
index fefa2e5de06a..4445dfd8057e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
index 60da063f5698..4173a465c7b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredmin-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.nxv1i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.nxv2i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.nxv4i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vfredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.nxv8i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
index e50e92a6bd20..b3d86138d2c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
index 948ad9f18da1..c7e7f5b2463e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredosum-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.nxv1i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.nxv2i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.nxv4i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vfredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.nxv8i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
index 87393c291ca8..6747a6655612 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
index 7000191c9a64..6079d7689927 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfredsum-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
   <vscale x 4 x half>,
   <vscale x 1 x half>,
   <vscale x 4 x half>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16.nxv1i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv1f16(
     <vscale x 4 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 4 x half> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
   <vscale x 4 x half>,
   <vscale x 2 x half>,
   <vscale x 4 x half>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16.nxv2i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv2f16(
     <vscale x 4 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 4 x half> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16.nxv4i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
   <vscale x 4 x half>,
   <vscale x 8 x half>,
   <vscale x 4 x half>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16.nxv8i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv8f16(
     <vscale x 4 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 4 x half> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
   <vscale x 4 x half>,
   <vscale x 16 x half>,
   <vscale x 4 x half>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16.nxv16i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv16f16(
     <vscale x 4 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 4 x half> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 4 x half> %a
 }
 
-declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+declare <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
   <vscale x 4 x half>,
   <vscale x 32 x half>,
   <vscale x 4 x half>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16.nxv32i1(
+  %a = call <vscale x 4 x half> @llvm.riscv.vfredsum.mask.nxv4f16.nxv32f16(
     <vscale x 4 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 4 x half> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
   <vscale x 2 x float>,
   <vscale x 1 x float>,
   <vscale x 2 x float>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32.nxv1i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv1f32(
     <vscale x 2 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 2 x float> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32.nxv2i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
   <vscale x 2 x float>,
   <vscale x 4 x float>,
   <vscale x 2 x float>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32.nxv4i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv4f32(
     <vscale x 2 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 2 x float> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
   <vscale x 2 x float>,
   <vscale x 8 x float>,
   <vscale x 2 x float>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32.nxv8i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv8f32(
     <vscale x 2 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 2 x float> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+declare <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
   <vscale x 2 x float>,
   <vscale x 16 x float>,
   <vscale x 2 x float>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32.nxv16i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfredsum.mask.nxv2f32.nxv16f32(
     <vscale x 2 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 2 x float> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x double>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64.nxv1i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
   <vscale x 1 x double>,
   <vscale x 2 x double>,
   <vscale x 1 x double>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64.nxv2i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv2f64(
     <vscale x 1 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 1 x double> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
   <vscale x 1 x double>,
   <vscale x 4 x double>,
   <vscale x 1 x double>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64.nxv4i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv4f64(
     <vscale x 1 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 1 x double> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
+declare <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
   <vscale x 1 x double>,
   <vscale x 8 x double>,
   <vscale x 1 x double>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vfredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64.nxv8i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfredsum.mask.nxv1f64.nxv8f64(
     <vscale x 1 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 1 x double> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
index 943f2009181c..e535188f38b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
@@ -1,16 +1,16 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,177 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
     i32 %2)
@@ -218,19 +378,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     half %2,
@@ -240,17 +400,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
     i32 %2)
@@ -258,19 +418,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     half %2,
@@ -280,17 +440,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
     i32 %2)
@@ -298,19 +458,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     half %2,
@@ -320,17 +480,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
     i32 %2)
@@ -338,19 +498,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     half %2,
@@ -360,17 +520,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
     i32 %2)
@@ -378,19 +538,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     half %2,
@@ -399,3 +559,163 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
+  <vscale x 1 x float>,
+  float,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
+    <vscale x 1 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
+  <vscale x 2 x float>,
+  float,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
+    <vscale x 2 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
+  <vscale x 4 x float>,
+  float,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
+    <vscale x 4 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
+  <vscale x 8 x float>,
+  float,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
+    <vscale x 8 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
index 3e7fb6474b30..9f926fdb219c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f32
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 %2)
@@ -218,19 +218,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -240,17 +240,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f32
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 %2)
@@ -258,19 +258,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -280,17 +280,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f32
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 %2)
@@ -298,19 +298,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -320,17 +320,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f32
+; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 %2)
@@ -338,19 +338,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -360,17 +360,17 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
     i64 %2)
@@ -378,19 +378,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     half %2,
@@ -400,17 +400,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
     i64 %2)
@@ -418,19 +418,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     half %2,
@@ -440,17 +440,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
     i64 %2)
@@ -458,19 +458,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     half %2,
@@ -480,17 +480,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
     i64 %2)
@@ -498,19 +498,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     half %2,
@@ -520,17 +520,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
     i64 %2)
@@ -538,19 +538,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     half %2,
@@ -560,17 +560,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32.f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f64_nxv1f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f32.f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
     i64 %2)
@@ -578,19 +578,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f64_nxv1f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     float %2,
@@ -600,17 +600,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32.f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f64_nxv2f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f32.f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
     i64 %2)
@@ -618,19 +618,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f64_nxv2f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     float %2,
@@ -640,17 +640,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32.f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f64_nxv4f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f32.f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
     i64 %2)
@@ -658,19 +658,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f64_nxv4f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     float %2,
@@ -680,17 +680,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32.f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f64_nxv8f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f32.f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
     i64 %2)
@@ -698,19 +698,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f64_nxv8f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     float %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
index fae0b94a6c0b..ff391c882fc9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
@@ -1,16 +1,16 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x half> %2,
@@ -200,14 +200,174 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
@@ -225,9 +385,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
@@ -245,9 +405,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
   half,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
@@ -265,9 +425,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
@@ -285,9 +445,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
   half,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
@@ -305,9 +465,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
@@ -325,9 +485,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
   half,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
@@ -345,9 +505,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
@@ -365,9 +525,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
   half,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
@@ -385,9 +545,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
@@ -399,3 +559,163 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
index 02ab0ad81baf..bc108823ce6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     i64 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     i64 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     i64 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     i64 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     i64 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     i64 %2)
@@ -218,19 +218,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x float> %2,
@@ -240,17 +240,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     i64 %2)
@@ -258,19 +258,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x float> %2,
@@ -280,17 +280,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     i64 %2)
@@ -298,19 +298,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x float> %2,
@@ -320,17 +320,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     i64 %2)
@@ -338,19 +338,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x float> %2,
@@ -365,9 +365,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
   half,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.nxv1f32.f16(
@@ -385,9 +385,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
@@ -405,9 +405,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
   half,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.nxv2f32.f16(
@@ -425,9 +425,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
@@ -445,9 +445,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
   half,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.nxv4f32.f16(
@@ -465,9 +465,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
@@ -485,9 +485,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
   half,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.nxv8f32.f16(
@@ -505,9 +505,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
@@ -525,9 +525,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
   half,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.nxv16f32.f16(
@@ -545,9 +545,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
@@ -565,9 +565,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
   float,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_nxv1f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.nxv1f64.f32(
@@ -585,9 +585,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_nxv1f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
@@ -605,9 +605,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
   float,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_nxv2f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.nxv2f64.f32(
@@ -625,9 +625,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_nxv2f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
@@ -645,9 +645,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
   float,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_nxv4f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.nxv4f64.f32(
@@ -665,9 +665,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_nxv4f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
@@ -685,9 +685,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
   float,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_nxv8f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.nxv8f64.f32(
@@ -705,9 +705,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_f32
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_nxv8f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
index b4d3d4086811..f5f0dae103d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv32.ll
@@ -1,16 +1,16 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,177 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16(
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
     i32 %2)
@@ -218,19 +378,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     half %2,
@@ -240,17 +400,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
     i32 %2)
@@ -258,19 +418,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     half %2,
@@ -280,17 +440,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
     i32 %2)
@@ -298,19 +458,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     half %2,
@@ -320,17 +480,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
     i32 %2)
@@ -338,19 +498,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     half %2,
@@ -360,17 +520,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
     i32 %2)
@@ -378,19 +538,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     half %2,
@@ -399,3 +559,163 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
+  <vscale x 1 x float>,
+  float,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
+    <vscale x 1 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
+  <vscale x 2 x float>,
+  float,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
+    <vscale x 2 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
+  <vscale x 4 x float>,
+  float,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
+    <vscale x 4 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
+  <vscale x 8 x float>,
+  float,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
+    <vscale x 8 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
index f84739475ee6..81d432b8f399 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-rv64.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1f32
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 %2)
@@ -218,19 +218,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -240,17 +240,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2f32
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 %2)
@@ -258,19 +258,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -280,17 +280,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4f32
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 %2)
@@ -298,19 +298,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -320,17 +320,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8f32
+; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 %2)
@@ -338,19 +338,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -360,17 +360,17 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
     i64 %2)
@@ -378,19 +378,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     half %2,
@@ -400,17 +400,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
     i64 %2)
@@ -418,19 +418,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwmul.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     half %2,
@@ -440,17 +440,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
     i64 %2)
@@ -458,19 +458,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwmul.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     half %2,
@@ -480,17 +480,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
     i64 %2)
@@ -498,19 +498,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwmul.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     half %2,
@@ -520,17 +520,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
     i64 %2)
@@ -538,19 +538,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwmul.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     half %2,
@@ -560,17 +560,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32.f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f64_nxv1f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f32.f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
     i64 %2)
@@ -578,19 +578,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32.f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f64_nxv1f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f32.f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwmul.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     float %2,
@@ -600,17 +600,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32.f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f64_nxv2f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f32.f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
     i64 %2)
@@ -618,19 +618,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32.f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f64_nxv2f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f32.f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwmul.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     float %2,
@@ -640,17 +640,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32.f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f64_nxv4f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f32.f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
     i64 %2)
@@ -658,19 +658,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32.f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f64_nxv4f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f32.f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwmul.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     float %2,
@@ -680,17 +680,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32.f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f64_nxv8f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f32.f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
     i64 %2)
@@ -698,19 +698,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32.f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f64_nxv8f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f32.f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwmul.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     float %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
index 91b86cf4058b..2ff4a0e85bcb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1(
+declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
   <vscale x 2 x float>,
   <vscale x 32 x half>,
   <vscale x 2 x float>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfwredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
index 1a9b1cde8226..9ba4bf1f5636 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredosum-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1(
+declare <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
   <vscale x 2 x float>,
   <vscale x 32 x half>,
   <vscale x 2 x float>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfwredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.nxv32i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv16i1(
+declare <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
   <vscale x 1 x double>,
   <vscale x 16 x float>,
   <vscale x 1 x double>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfwredosum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.nxv16i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
     <vscale x 16 x float> %1,
     <vscale x 1 x double> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
index dcc16b1b07ea..0ee095224c94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1(
+declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
   <vscale x 2 x float>,
   <vscale x 32 x half>,
   <vscale x 2 x float>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfwredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
index ce2eb047c1b0..48041740443d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwredsum-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1(
+declare <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
   <vscale x 2 x float>,
   <vscale x 32 x half>,
   <vscale x 2 x float>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vfwredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16.nxv32i1(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwredsum.mask.nxv2f32.nxv32f16(
     <vscale x 2 x float> %0,
     <vscale x 32 x half> %1,
     <vscale x 2 x float> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv16i1(
+declare <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
   <vscale x 1 x double>,
   <vscale x 16 x float>,
   <vscale x 1 x double>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vfwredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32.nxv16i1(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwredsum.mask.nxv1f64.nxv16f32(
     <vscale x 1 x double> %0,
     <vscale x 16 x float> %1,
     <vscale x 1 x double> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
index e54016962967..40af29b0e981 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll
@@ -1,16 +1,16 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,177 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
     i32 %2)
@@ -218,19 +378,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     half %2,
@@ -240,17 +400,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
     i32 %2)
@@ -258,19 +418,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     half %2,
@@ -280,17 +440,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
     i32 %2)
@@ -298,19 +458,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     half %2,
@@ -320,17 +480,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
     i32 %2)
@@ -338,19 +498,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     half %2,
@@ -360,17 +520,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
     i32 %2)
@@ -378,19 +538,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     half %2,
@@ -399,3 +559,163 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
+  <vscale x 1 x float>,
+  float,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
+    <vscale x 1 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  float,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
+  <vscale x 2 x float>,
+  float,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
+    <vscale x 2 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  float,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
+  <vscale x 4 x float>,
+  float,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
+    <vscale x 4 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  float,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
+  <vscale x 8 x float>,
+  float,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
+    <vscale x 8 x float> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  float,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
index 9226f3216532..a3c0f7ca71b0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     i64 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f16_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     i64 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f16_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     i64 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f16_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     i64 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f16_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     i64 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16f16_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f32
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     i64 %2)
@@ -218,19 +218,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f64_nxv1f32_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
@@ -240,17 +240,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f32
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     i64 %2)
@@ -258,19 +258,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f64_nxv2f32_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
@@ -280,17 +280,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f32
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     i64 %2)
@@ -298,19 +298,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f64_nxv4f32_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
@@ -320,17 +320,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f32
+; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     i64 %2)
@@ -338,19 +338,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f64_nxv8f32_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
@@ -360,17 +360,17 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
   <vscale x 1 x half>,
   half,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.nxv1f32.nxv1f16.f16(
     <vscale x 1 x half> %0,
     half %1,
     i64 %2)
@@ -378,19 +378,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   half,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f16.f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.mask.nxv1f32.nxv1f16.f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     half %2,
@@ -400,17 +400,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
   <vscale x 2 x half>,
   half,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.nxv2f32.nxv2f16.f16(
     <vscale x 2 x half> %0,
     half %1,
     i64 %2)
@@ -418,19 +418,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   half,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f16.f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.mask.nxv2f32.nxv2f16.f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     half %2,
@@ -440,17 +440,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
   <vscale x 4 x half>,
   half,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.nxv4f32.nxv4f16.f16(
     <vscale x 4 x half> %0,
     half %1,
     i64 %2)
@@ -458,19 +458,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   half,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f16.f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.mask.nxv4f32.nxv4f16.f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     half %2,
@@ -480,17 +480,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
   <vscale x 8 x half>,
   half,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.nxv8f32.nxv8f16.f16(
     <vscale x 8 x half> %0,
     half %1,
     i64 %2)
@@ -498,19 +498,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   half,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f16.f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.mask.nxv8f32.nxv8f16.f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     half %2,
@@ -520,17 +520,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
   <vscale x 16 x half>,
   half,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.nxv16f32.nxv16f16.f16(
     <vscale x 16 x half> %0,
     half %1,
     i64 %2)
@@ -538,19 +538,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   half,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16f16_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f16.f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.mask.nxv16f32.nxv16f16.f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     half %2,
@@ -560,17 +560,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32.f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
   <vscale x 1 x float>,
   float,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f64_nxv1f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f32.f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.nxv1f64.nxv1f32.f32(
     <vscale x 1 x float> %0,
     float %1,
     i64 %2)
@@ -578,19 +578,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32.f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   float,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f64_nxv1f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f32.f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.mask.nxv1f64.nxv1f32.f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     float %2,
@@ -600,17 +600,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32.f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
   <vscale x 2 x float>,
   float,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f64_nxv2f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f32.f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.nxv2f64.nxv2f32.f32(
     <vscale x 2 x float> %0,
     float %1,
     i64 %2)
@@ -618,19 +618,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32.f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   float,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f64_nxv2f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f32.f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.mask.nxv2f64.nxv2f32.f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     float %2,
@@ -640,17 +640,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32.f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
   <vscale x 4 x float>,
   float,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f64_nxv4f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f32.f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.nxv4f64.nxv4f32.f32(
     <vscale x 4 x float> %0,
     float %1,
     i64 %2)
@@ -658,19 +658,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32.f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   float,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f64_nxv4f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f32.f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.mask.nxv4f64.nxv4f32.f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     float %2,
@@ -680,17 +680,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32.f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
   <vscale x 8 x float>,
   float,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f64_nxv8f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f32.f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.nxv8f64.nxv8f32.f32(
     <vscale x 8 x float> %0,
     float %1,
     i64 %2)
@@ -698,19 +698,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32.f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   float,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_f32
+; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f64_nxv8f32_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f32.f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.mask.nxv8f64.nxv8f32.f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     float %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
index d50d1687a352..5c770e2a6ad7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
@@ -1,16 +1,16 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     i32 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     i32 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     i32 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     i32 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     i32 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x half> %2,
@@ -200,14 +200,174 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  <vscale x 1 x float>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    <vscale x 1 x float> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  <vscale x 2 x float>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    <vscale x 2 x float> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  <vscale x 4 x float>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    <vscale x 4 x float> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  <vscale x 8 x float>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    <vscale x 8 x float> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
 declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
   <vscale x 1 x float>,
   half,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
@@ -225,9 +385,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
   <vscale x 1 x i1>,
   i32);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
@@ -245,9 +405,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
   half,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
@@ -265,9 +425,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
   <vscale x 2 x i1>,
   i32);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
@@ -285,9 +445,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
   half,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
@@ -305,9 +465,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
   <vscale x 4 x i1>,
   i32);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
@@ -325,9 +485,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
   half,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
@@ -345,9 +505,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
   <vscale x 8 x i1>,
   i32);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
@@ -365,9 +525,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
   half,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
@@ -385,9 +545,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
   <vscale x 16 x i1>,
   i32);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
@@ -399,3 +559,163 @@ entry:
 
   ret <vscale x 16 x float> %a
 }
+
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
+  <vscale x 1 x double>,
+  float,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>,
+  float,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %1,
+    float %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
+  <vscale x 2 x double>,
+  float,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>,
+  float,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %1,
+    float %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
+  <vscale x 4 x double>,
+  float,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>,
+  float,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %1,
+    float %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
+  <vscale x 8 x double>,
+  float,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    float %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>,
+  float,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %1,
+    float %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
index 43245ff0a18b..ed9f4bfd7b4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
@@ -1,16 +1,16 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x half> %1,
     i64 %2)
@@ -18,19 +18,19 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
+declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f16(
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x half> %2,
@@ -40,17 +40,17 @@ entry:
   ret <vscale x 1 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x half> %1,
     i64 %2)
@@ -58,19 +58,19 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
+declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f16(
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x half> %2,
@@ -80,17 +80,17 @@ entry:
   ret <vscale x 2 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x half> %1,
     i64 %2)
@@ -98,19 +98,19 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
+declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f16(
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x half> %2,
@@ -120,17 +120,17 @@ entry:
   ret <vscale x 4 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x half> %1,
     i64 %2)
@@ -138,19 +138,19 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
+declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f16(
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x half> %2,
@@ -160,17 +160,17 @@ entry:
   ret <vscale x 8 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x half> %1,
     i64 %2)
@@ -178,19 +178,19 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
+declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f16(
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x half> %2,
@@ -200,17 +200,17 @@ entry:
   ret <vscale x 16 x float> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x float> %1,
     i64 %2)
@@ -218,19 +218,19 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f32(
+declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f64_nxv1f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f32(
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x float> %2,
@@ -240,17 +240,17 @@ entry:
   ret <vscale x 1 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x float> %1,
     i64 %2)
@@ -258,19 +258,19 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f32(
+declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f64_nxv2f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f32(
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x float> %2,
@@ -280,17 +280,17 @@ entry:
   ret <vscale x 2 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x float> %1,
     i64 %2)
@@ -298,19 +298,19 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f32(
+declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f64_nxv4f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f32(
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x float> %2,
@@ -320,17 +320,17 @@ entry:
   ret <vscale x 4 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x float> %1,
     i64 %2)
@@ -338,19 +338,19 @@ entry:
   ret <vscale x 8 x double> %a
 }
 
-declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f32(
+declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f64_nxv8f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f32(
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x float> %2,
@@ -365,9 +365,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
   half,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.nxv1f32.f16(
@@ -385,9 +385,9 @@ declare <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
@@ -405,9 +405,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
   half,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.nxv2f32.f16(
@@ -425,9 +425,9 @@ declare <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
@@ -445,9 +445,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
   half,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.nxv4f32.f16(
@@ -465,9 +465,9 @@ declare <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
@@ -485,9 +485,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
   half,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.nxv8f32.f16(
@@ -505,9 +505,9 @@ declare <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
@@ -525,9 +525,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
   half,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.nxv16f32.f16(
@@ -545,9 +545,9 @@ declare <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
   <vscale x 16 x i1>,
   i64);
 
-define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_f16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
@@ -565,9 +565,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
   float,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_nxv1f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.nxv1f64.f32(
@@ -585,9 +585,9 @@ declare <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
   <vscale x 1 x i1>,
   i64);
 
-define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, <vscale x 1 x double> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_nxv1f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
@@ -605,9 +605,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
   float,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_nxv2f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.nxv2f64.f32(
@@ -625,9 +625,9 @@ declare <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
   <vscale x 2 x i1>,
   i64);
 
-define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, <vscale x 2 x double> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_nxv2f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
@@ -645,9 +645,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
   float,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_nxv4f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.nxv4f64.f32(
@@ -665,9 +665,9 @@ declare <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
   <vscale x 4 x i1>,
   i64);
 
-define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, <vscale x 4 x double> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_nxv4f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
@@ -685,9 +685,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
   float,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, i64 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_nxv8f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.nxv8f64.f32(
@@ -705,9 +705,9 @@ declare <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
   <vscale x 8 x i1>,
   i64);
 
-define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, <vscale x 8 x double> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_f32
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_nxv8f64_f32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
 ; CHECK:       vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
index 63218bfebb65..761470d6a7e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i32);
@@ -10,7 +10,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 %2)
@@ -18,7 +18,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i32);
@@ -28,7 +28,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 %2)
@@ -36,7 +36,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i32);
@@ -46,7 +46,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 %2)
@@ -54,7 +54,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i32);
@@ -64,7 +64,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 %2)
@@ -72,7 +72,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i32);
@@ -82,7 +82,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 %2)
@@ -90,7 +90,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i32);
@@ -100,7 +100,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 %2)
@@ -108,7 +108,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i32);
@@ -118,7 +118,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i32 %2)
@@ -126,7 +126,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i32);
@@ -136,7 +136,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 %2)
@@ -144,7 +144,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i32);
@@ -154,7 +154,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 %2)
@@ -162,7 +162,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i32);
@@ -172,7 +172,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 %2)
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i32);
@@ -190,7 +190,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 %2)
@@ -198,7 +198,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i32);
@@ -208,7 +208,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 %2)
@@ -216,7 +216,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i32);
@@ -226,7 +226,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i32 %2)
@@ -234,7 +234,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32);
@@ -244,7 +244,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2)
@@ -252,7 +252,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32);
@@ -262,7 +262,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2)
@@ -270,7 +270,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32);
@@ -280,7 +280,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2)
@@ -288,7 +288,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32);
@@ -298,7 +298,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2)
@@ -306,7 +306,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32);
@@ -316,7 +316,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2)
@@ -324,7 +324,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   i32);
@@ -334,7 +334,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     i32 %2)
@@ -342,7 +342,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   i32);
@@ -352,7 +352,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     i32 %2)
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   i32);
@@ -370,7 +370,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     i32 %2)
@@ -378,7 +378,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   i32);
@@ -388,7 +388,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     i32 %2)
@@ -396,7 +396,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   i32);
@@ -406,7 +406,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     i32 %2)
@@ -414,7 +414,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   i32);
@@ -424,7 +424,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     i32 %2)
@@ -432,7 +432,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   i32);
@@ -442,7 +442,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     i32 %2)
@@ -450,7 +450,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   i32);
@@ -460,7 +460,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     i32 %2)
@@ -468,7 +468,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   i32);
@@ -478,7 +478,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     i32 %2)
@@ -486,7 +486,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   i32);
@@ -496,7 +496,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     i32 %2)
@@ -504,7 +504,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   i32);
@@ -514,7 +514,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     i32 %2)
@@ -522,7 +522,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   i32);
@@ -532,7 +532,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     i32 %2)
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   i32);
@@ -550,7 +550,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     i32 %2)
@@ -558,7 +558,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   i32);
@@ -568,7 +568,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -576,7 +576,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   i32);
@@ -586,7 +586,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -594,7 +594,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   i32);
@@ -604,7 +604,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -612,7 +612,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   i32);
@@ -622,7 +622,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -630,7 +630,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   i32);
@@ -640,7 +640,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)
@@ -653,7 +653,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
     i32 %1)
@@ -666,7 +666,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -9,
     i32 %1)
@@ -679,7 +679,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
     i32 %1)
@@ -692,7 +692,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
     i32 %1)
@@ -705,7 +705,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
     i32 %1)
@@ -718,7 +718,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -9,
     i32 %1)
@@ -731,7 +731,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 9,
     i32 %1)
@@ -744,7 +744,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -9,
     i32 %1)
@@ -757,7 +757,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
     i32 %1)
@@ -770,7 +770,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 -9,
     i32 %1)
@@ -783,7 +783,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
     i32 %1)
@@ -796,7 +796,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 -9,
     i32 %1)
@@ -809,7 +809,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 9,
     i32 %1)
@@ -822,7 +822,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 -9,
     i32 %1)
@@ -835,7 +835,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i32 %1)
@@ -848,7 +848,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 -9,
     i32 %1)
@@ -861,7 +861,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i32 %1)
@@ -874,7 +874,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 -9,
     i32 %1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
index 1b6c8eb93ea3..71958e41b06d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i64);
@@ -10,7 +10,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 %2)
@@ -18,7 +18,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i64);
@@ -28,7 +28,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 %2)
@@ -36,7 +36,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i64);
@@ -46,7 +46,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 %2)
@@ -54,7 +54,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i64);
@@ -64,7 +64,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 %2)
@@ -72,7 +72,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i64);
@@ -82,7 +82,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 %2)
@@ -90,7 +90,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i64);
@@ -100,7 +100,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 %2)
@@ -108,7 +108,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i64);
@@ -118,7 +118,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i64 %2)
@@ -126,7 +126,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i64);
@@ -136,7 +136,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 %2)
@@ -144,7 +144,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i64);
@@ -154,7 +154,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 %2)
@@ -162,7 +162,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i64);
@@ -172,7 +172,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 %2)
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i64);
@@ -190,7 +190,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 %2)
@@ -198,7 +198,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i64);
@@ -208,7 +208,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 %2)
@@ -216,7 +216,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i64);
@@ -226,7 +226,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i64 %2)
@@ -234,7 +234,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i64);
@@ -244,7 +244,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 %2)
@@ -252,7 +252,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i64);
@@ -262,7 +262,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 %2)
@@ -270,7 +270,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i64);
@@ -280,7 +280,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 %2)
@@ -288,7 +288,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i64);
@@ -298,7 +298,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 %2)
@@ -306,7 +306,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i64);
@@ -316,7 +316,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i64 %2)
@@ -324,7 +324,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64);
@@ -334,7 +334,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.nxv1i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2)
@@ -342,7 +342,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64);
@@ -352,7 +352,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.nxv2i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2)
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64);
@@ -370,7 +370,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.nxv4i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2)
@@ -378,7 +378,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64);
@@ -388,7 +388,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.nxv8i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2)
@@ -396,7 +396,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   i64);
@@ -406,7 +406,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     i64 %2)
@@ -414,7 +414,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   i64);
@@ -424,7 +424,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     i64 %2)
@@ -432,7 +432,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   i64);
@@ -442,7 +442,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     i64 %2)
@@ -450,7 +450,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   i64);
@@ -460,7 +460,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     i64 %2)
@@ -468,7 +468,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   i64);
@@ -478,7 +478,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     i64 %2)
@@ -486,7 +486,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   i64);
@@ -496,7 +496,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     i64 %2)
@@ -504,7 +504,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   i64);
@@ -514,7 +514,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     i64 %2)
@@ -522,7 +522,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   i64);
@@ -532,7 +532,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     i64 %2)
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   i64);
@@ -550,7 +550,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     i64 %2)
@@ -558,7 +558,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   i64);
@@ -568,7 +568,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     i64 %2)
@@ -576,7 +576,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   i64);
@@ -586,7 +586,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     i64 %2)
@@ -594,7 +594,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   i64);
@@ -604,7 +604,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     i64 %2)
@@ -612,7 +612,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   i64);
@@ -622,7 +622,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     i64 %2)
@@ -630,7 +630,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   i64);
@@ -640,7 +640,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i64 %2)
@@ -648,7 +648,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   i64);
@@ -658,7 +658,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i64 %2)
@@ -666,7 +666,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   i64);
@@ -676,7 +676,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i64 %2)
@@ -684,7 +684,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   i64);
@@ -694,7 +694,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i64 %2)
@@ -702,7 +702,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   i64);
@@ -712,7 +712,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i64 %2)
@@ -720,7 +720,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   i64);
@@ -730,7 +730,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -738,7 +738,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   i64);
@@ -748,7 +748,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -756,7 +756,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   i64);
@@ -766,7 +766,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -774,7 +774,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   i64);
@@ -784,7 +784,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)
@@ -797,7 +797,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
     i64 %1)
@@ -810,7 +810,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 -9,
     i64 %1)
@@ -823,7 +823,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
     i64 %1)
@@ -836,7 +836,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 -9,
     i64 %1)
@@ -849,7 +849,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
     i64 %1)
@@ -862,7 +862,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 -9,
     i64 %1)
@@ -875,7 +875,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 9,
     i64 %1)
@@ -888,7 +888,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 -9,
     i64 %1)
@@ -901,7 +901,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
     i64 %1)
@@ -914,7 +914,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 -9,
     i64 %1)
@@ -927,7 +927,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
     i64 %1)
@@ -940,7 +940,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 -9,
     i64 %1)
@@ -953,7 +953,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 9,
     i64 %1)
@@ -966,7 +966,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 -9,
     i64 %1)
@@ -979,7 +979,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
     i64 %1)
@@ -992,7 +992,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 -9,
     i64 %1)
@@ -1005,7 +1005,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
     i64 %1)
@@ -1018,7 +1018,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 -9,
     i64 %1)
@@ -1031,7 +1031,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
     i64 %1)
@@ -1044,7 +1044,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 -9,
     i64 %1)
@@ -1057,7 +1057,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
     i64 %1)
@@ -1070,7 +1070,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 -9,
     i64 %1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
index 3dbdf047b7e7..a59cfbf669f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
@@ -31,7 +31,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
@@ -51,7 +51,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
@@ -60,7 +60,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
@@ -71,7 +71,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
@@ -80,7 +80,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
@@ -91,7 +91,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
@@ -100,7 +100,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
@@ -111,7 +111,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
@@ -120,7 +120,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
@@ -131,7 +131,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
@@ -140,7 +140,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
@@ -151,7 +151,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
@@ -160,7 +160,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
@@ -171,7 +171,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
@@ -191,7 +191,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
@@ -200,7 +200,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
@@ -211,7 +211,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
@@ -220,7 +220,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
@@ -231,7 +231,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
@@ -251,7 +251,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
@@ -260,7 +260,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
@@ -271,7 +271,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
@@ -291,7 +291,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
@@ -300,7 +300,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
@@ -311,7 +311,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
@@ -320,7 +320,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
@@ -331,7 +331,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
@@ -340,7 +340,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
@@ -351,7 +351,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
@@ -371,7 +371,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
@@ -380,7 +380,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
@@ -391,7 +391,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
@@ -400,7 +400,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
@@ -411,7 +411,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
@@ -420,7 +420,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
@@ -431,7 +431,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
@@ -451,7 +451,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
@@ -460,7 +460,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
@@ -471,7 +471,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
@@ -480,7 +480,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
@@ -491,7 +491,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
@@ -500,7 +500,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
@@ -511,7 +511,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
@@ -520,7 +520,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
@@ -531,7 +531,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
@@ -551,7 +551,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
@@ -560,7 +560,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
@@ -571,7 +571,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
@@ -580,7 +580,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
@@ -591,7 +591,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
@@ -600,7 +600,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
@@ -611,7 +611,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
@@ -620,7 +620,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
@@ -631,7 +631,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
@@ -640,7 +640,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
@@ -651,7 +651,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
@@ -660,7 +660,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
@@ -671,7 +671,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
@@ -680,7 +680,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
@@ -691,7 +691,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
@@ -700,7 +700,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
@@ -711,7 +711,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
@@ -725,7 +725,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
     <vscale x 1 x i1> %1,
@@ -738,10 +738,10 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
-    i8 -9,
+    i8 9,
     <vscale x 2 x i1> %1,
     i32 %2)
 
@@ -753,7 +753,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
     <vscale x 4 x i1> %1,
@@ -766,10 +766,10 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
-    i8 -9,
+    i8 9,
     <vscale x 8 x i1> %1,
     i32 %2)
 
@@ -781,7 +781,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
     <vscale x 16 x i1> %1,
@@ -794,10 +794,10 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vsca
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
-    i8 -9,
+    i8 9,
     <vscale x 32 x i1> %1,
     i32 %2)
 
@@ -809,7 +809,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 9,
     <vscale x 64 x i1> %1,
@@ -822,10 +822,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
-    i16 -9,
+    i16 9,
     <vscale x 1 x i1> %1,
     i32 %2)
 
@@ -837,7 +837,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
     <vscale x 2 x i1> %1,
@@ -850,10 +850,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
-    i16 -9,
+    i16 9,
     <vscale x 4 x i1> %1,
     i32 %2)
 
@@ -865,7 +865,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
     <vscale x 8 x i1> %1,
@@ -878,10 +878,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vs
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
-    i16 -9,
+    i16 9,
     <vscale x 16 x i1> %1,
     i32 %2)
 
@@ -893,7 +893,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 9,
     <vscale x 32 x i1> %1,
@@ -906,10 +906,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
-    i32 -9,
+    i32 9,
     <vscale x 1 x i1> %1,
     i32 %2)
 
@@ -921,7 +921,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
     <vscale x 2 x i1> %1,
@@ -934,10 +934,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
-    i32 -9,
+    i32 9,
     <vscale x 4 x i1> %1,
     i32 %2)
 
@@ -949,7 +949,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
     <vscale x 8 x i1> %1,
@@ -962,10 +962,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vs
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
-    i32 -9,
+    i32 9,
     <vscale x 16 x i1> %1,
     i32 %2)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
index b94a2b9f4d98..7ba10e48ea49 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
@@ -31,7 +31,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
@@ -51,7 +51,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
@@ -60,7 +60,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
@@ -71,7 +71,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
@@ -80,7 +80,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
@@ -91,7 +91,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
@@ -100,7 +100,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
@@ -111,7 +111,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
@@ -120,7 +120,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
@@ -131,7 +131,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
@@ -140,7 +140,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
@@ -151,7 +151,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
@@ -160,7 +160,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
@@ -171,7 +171,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
@@ -191,7 +191,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
@@ -200,7 +200,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
@@ -211,7 +211,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
@@ -220,7 +220,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
@@ -231,7 +231,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
@@ -251,7 +251,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
@@ -260,7 +260,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
@@ -271,7 +271,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
@@ -291,7 +291,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
@@ -300,7 +300,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
@@ -311,7 +311,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
@@ -320,7 +320,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
@@ -331,7 +331,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
@@ -340,7 +340,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
@@ -351,7 +351,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
@@ -371,7 +371,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
@@ -380,7 +380,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
@@ -391,7 +391,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
@@ -400,7 +400,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
@@ -411,7 +411,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
@@ -420,7 +420,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
@@ -431,7 +431,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
@@ -451,7 +451,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
@@ -460,7 +460,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
@@ -471,7 +471,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
@@ -480,7 +480,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
@@ -491,7 +491,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
@@ -500,7 +500,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
@@ -511,7 +511,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
@@ -520,7 +520,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
@@ -531,7 +531,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
@@ -551,7 +551,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
@@ -560,7 +560,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
@@ -571,7 +571,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
@@ -580,7 +580,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
@@ -591,7 +591,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
@@ -600,7 +600,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
@@ -611,7 +611,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
@@ -620,7 +620,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
@@ -631,7 +631,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
@@ -640,7 +640,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
@@ -651,7 +651,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
@@ -660,7 +660,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
@@ -671,7 +671,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
@@ -680,7 +680,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
@@ -691,7 +691,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
@@ -700,7 +700,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
@@ -711,7 +711,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
@@ -720,7 +720,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
@@ -731,7 +731,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
@@ -740,7 +740,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
@@ -751,7 +751,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
@@ -760,7 +760,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
@@ -771,7 +771,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
@@ -780,7 +780,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
@@ -791,7 +791,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
@@ -800,7 +800,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
@@ -811,7 +811,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i1> %2,
@@ -820,7 +820,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
@@ -831,7 +831,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i1> %2,
@@ -840,7 +840,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
@@ -851,7 +851,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i1> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
@@ -871,7 +871,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     <vscale x 8 x i1> %2,
@@ -885,7 +885,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 9,
     <vscale x 1 x i1> %1,
@@ -899,7 +899,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 9,
     <vscale x 2 x i1> %1,
@@ -913,7 +913,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 9,
     <vscale x 4 x i1> %1,
@@ -927,7 +927,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 9,
     <vscale x 8 x i1> %1,
@@ -941,7 +941,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 9,
     <vscale x 16 x i1> %1,
@@ -955,7 +955,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 9,
     <vscale x 32 x i1> %1,
@@ -969,7 +969,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.carry.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 9,
     <vscale x 64 x i1> %1,
@@ -983,7 +983,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 9,
     <vscale x 1 x i1> %1,
@@ -997,7 +997,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 9,
     <vscale x 2 x i1> %1,
@@ -1011,7 +1011,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 9,
     <vscale x 4 x i1> %1,
@@ -1025,7 +1025,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 9,
     <vscale x 8 x i1> %1,
@@ -1039,7 +1039,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 9,
     <vscale x 16 x i1> %1,
@@ -1053,7 +1053,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 9,
     <vscale x 32 x i1> %1,
@@ -1067,7 +1067,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 9,
     <vscale x 1 x i1> %1,
@@ -1081,7 +1081,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 9,
     <vscale x 2 x i1> %1,
@@ -1095,7 +1095,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 9,
     <vscale x 4 x i1> %1,
@@ -1109,7 +1109,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 9,
     <vscale x 8 x i1> %1,
@@ -1123,7 +1123,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 9,
     <vscale x 16 x i1> %1,
@@ -1137,7 +1137,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 9,
     <vscale x 1 x i1> %1,
@@ -1151,7 +1151,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 9,
     <vscale x 2 x i1> %1,
@@ -1165,7 +1165,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 9,
     <vscale x 4 x i1> %1,
@@ -1179,7 +1179,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 9,
     <vscale x 8 x i1> %1,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
index 4e8eabcf7ea3..213c69445fab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i32);
@@ -10,7 +10,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i32 %2)
@@ -18,7 +18,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i32);
@@ -28,7 +28,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i32 %2)
@@ -36,7 +36,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i32);
@@ -46,7 +46,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i32 %2)
@@ -54,7 +54,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i32);
@@ -64,7 +64,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i32 %2)
@@ -72,7 +72,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i32);
@@ -82,7 +82,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i32 %2)
@@ -90,7 +90,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i32);
@@ -100,7 +100,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i32 %2)
@@ -108,7 +108,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i32);
@@ -118,7 +118,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i32 %2)
@@ -126,7 +126,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i32);
@@ -136,7 +136,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i32 %2)
@@ -144,7 +144,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i32);
@@ -154,7 +154,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i32 %2)
@@ -162,7 +162,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i32);
@@ -172,7 +172,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i32 %2)
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i32);
@@ -190,7 +190,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i32 %2)
@@ -198,7 +198,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i32);
@@ -208,7 +208,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i32 %2)
@@ -216,7 +216,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i32);
@@ -226,7 +226,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i32 %2)
@@ -234,7 +234,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i32);
@@ -244,7 +244,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i32 %2)
@@ -252,7 +252,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i32);
@@ -262,7 +262,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i32 %2)
@@ -270,7 +270,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i32);
@@ -280,7 +280,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i32 %2)
@@ -288,7 +288,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i32);
@@ -298,7 +298,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i32 %2)
@@ -306,7 +306,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i32);
@@ -316,7 +316,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i32 %2)
@@ -324,7 +324,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   i32);
@@ -334,7 +334,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     i32 %2)
@@ -342,7 +342,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   i32);
@@ -352,7 +352,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     i32 %2)
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   i32);
@@ -370,7 +370,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     i32 %2)
@@ -378,7 +378,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   i32);
@@ -388,7 +388,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     i32 %2)
@@ -396,7 +396,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   i32);
@@ -406,7 +406,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     i32 %2)
@@ -414,7 +414,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   i32);
@@ -424,7 +424,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     i32 %2)
@@ -432,7 +432,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   i32);
@@ -442,7 +442,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     i32 %2)
@@ -450,7 +450,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   i32);
@@ -460,7 +460,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     i32 %2)
@@ -468,7 +468,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   i32);
@@ -478,7 +478,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     i32 %2)
@@ -486,7 +486,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   i32);
@@ -496,7 +496,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     i32 %2)
@@ -504,7 +504,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   i32);
@@ -514,7 +514,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     i32 %2)
@@ -522,7 +522,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   i32);
@@ -532,7 +532,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     i32 %2)
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   i32);
@@ -550,7 +550,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     i32 %2)
@@ -558,7 +558,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   i32);
@@ -568,7 +568,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i32 %2)
@@ -576,7 +576,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   i32);
@@ -586,7 +586,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i32 %2)
@@ -594,7 +594,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   i32);
@@ -604,7 +604,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i32 %2)
@@ -612,7 +612,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   i32);
@@ -622,7 +622,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i32 %2)
@@ -630,7 +630,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   i32);
@@ -640,7 +640,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i32 %2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
index b28aaaef3b02..7f907923ff6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   i64);
@@ -10,7 +10,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     i64 %2)
@@ -18,7 +18,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   i64);
@@ -28,7 +28,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     i64 %2)
@@ -36,7 +36,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   i64);
@@ -46,7 +46,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     i64 %2)
@@ -54,7 +54,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   i64);
@@ -64,7 +64,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     i64 %2)
@@ -72,7 +72,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   i64);
@@ -82,7 +82,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     i64 %2)
@@ -90,7 +90,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   i64);
@@ -100,7 +100,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     i64 %2)
@@ -108,7 +108,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   i64);
@@ -118,7 +118,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     i64 %2)
@@ -126,7 +126,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   i64);
@@ -136,7 +136,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     i64 %2)
@@ -144,7 +144,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   i64);
@@ -154,7 +154,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     i64 %2)
@@ -162,7 +162,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   i64);
@@ -172,7 +172,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     i64 %2)
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   i64);
@@ -190,7 +190,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     i64 %2)
@@ -198,7 +198,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   i64);
@@ -208,7 +208,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     i64 %2)
@@ -216,7 +216,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   i64);
@@ -226,7 +226,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     i64 %2)
@@ -234,7 +234,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   i64);
@@ -244,7 +244,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     i64 %2)
@@ -252,7 +252,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   i64);
@@ -262,7 +262,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     i64 %2)
@@ -270,7 +270,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   i64);
@@ -280,7 +280,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     i64 %2)
@@ -288,7 +288,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   i64);
@@ -298,7 +298,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     i64 %2)
@@ -306,7 +306,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   i64);
@@ -316,7 +316,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     i64 %2)
@@ -324,7 +324,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   i64);
@@ -334,7 +334,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.nxv1i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     i64 %2)
@@ -342,7 +342,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   i64);
@@ -352,7 +352,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.nxv2i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     i64 %2)
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   i64);
@@ -370,7 +370,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.nxv4i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     i64 %2)
@@ -378,7 +378,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   i64);
@@ -388,7 +388,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.nxv8i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     i64 %2)
@@ -396,7 +396,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   i64);
@@ -406,7 +406,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     i64 %2)
@@ -414,7 +414,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   i64);
@@ -424,7 +424,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     i64 %2)
@@ -432,7 +432,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   i64);
@@ -442,7 +442,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     i64 %2)
@@ -450,7 +450,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   i64);
@@ -460,7 +460,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     i64 %2)
@@ -468,7 +468,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   i64);
@@ -478,7 +478,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     i64 %2)
@@ -486,7 +486,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   i64);
@@ -496,7 +496,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     i64 %2)
@@ -504,7 +504,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   i64);
@@ -514,7 +514,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     i64 %2)
@@ -522,7 +522,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   i64);
@@ -532,7 +532,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     i64 %2)
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   i64);
@@ -550,7 +550,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     i64 %2)
@@ -558,7 +558,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   i64);
@@ -568,7 +568,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     i64 %2)
@@ -576,7 +576,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   i64);
@@ -586,7 +586,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     i64 %2)
@@ -594,7 +594,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   i64);
@@ -604,7 +604,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     i64 %2)
@@ -612,7 +612,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   i64);
@@ -622,7 +622,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     i64 %2)
@@ -630,7 +630,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   i64);
@@ -640,7 +640,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     i64 %2)
@@ -648,7 +648,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   i64);
@@ -658,7 +658,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     i64 %2)
@@ -666,7 +666,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   i64);
@@ -676,7 +676,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     i64 %2)
@@ -684,7 +684,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   i64);
@@ -694,7 +694,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     i64 %2)
@@ -702,7 +702,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   i64);
@@ -712,7 +712,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     i64 %2)
@@ -720,7 +720,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   i64);
@@ -730,7 +730,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i1.i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     i64 %2)
@@ -738,7 +738,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   i64);
@@ -748,7 +748,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i1.i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     i64 %2)
@@ -756,7 +756,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   i64);
@@ -766,7 +766,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i1.i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     i64 %2)
@@ -774,7 +774,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   i64);
@@ -784,7 +784,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i1.i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     i64 %2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
index 6a8253ffc504..09e8c90efbf2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
@@ -31,7 +31,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
@@ -51,7 +51,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
@@ -60,7 +60,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
@@ -71,7 +71,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
@@ -80,7 +80,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
@@ -91,7 +91,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
@@ -100,7 +100,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
@@ -111,7 +111,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
@@ -120,7 +120,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
@@ -131,7 +131,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
@@ -140,7 +140,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
@@ -151,7 +151,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
@@ -160,7 +160,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
@@ -171,7 +171,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
@@ -191,7 +191,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
@@ -200,7 +200,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
@@ -211,7 +211,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
@@ -220,7 +220,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
@@ -231,7 +231,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
@@ -251,7 +251,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
@@ -260,7 +260,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
@@ -271,7 +271,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
@@ -291,7 +291,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
@@ -300,7 +300,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
@@ -311,7 +311,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
@@ -320,7 +320,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
@@ -331,7 +331,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
@@ -340,7 +340,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
@@ -351,7 +351,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
@@ -371,7 +371,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
@@ -380,7 +380,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
@@ -391,7 +391,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
@@ -400,7 +400,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
@@ -411,7 +411,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
@@ -420,7 +420,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
@@ -431,7 +431,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
@@ -451,7 +451,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
@@ -460,7 +460,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
@@ -471,7 +471,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
@@ -480,7 +480,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
@@ -491,7 +491,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
@@ -500,7 +500,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
@@ -511,7 +511,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
@@ -520,7 +520,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
@@ -531,7 +531,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
@@ -551,7 +551,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
@@ -560,7 +560,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
@@ -571,7 +571,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
@@ -580,7 +580,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
@@ -591,7 +591,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
@@ -600,7 +600,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
@@ -611,7 +611,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
@@ -620,7 +620,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
@@ -631,7 +631,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
@@ -640,7 +640,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
@@ -651,7 +651,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
@@ -660,7 +660,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
@@ -671,7 +671,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
@@ -680,7 +680,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
@@ -691,7 +691,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
@@ -700,7 +700,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
@@ -711,7 +711,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
index 5b22e5977ffd..901df3eea8ea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
@@ -11,7 +11,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
@@ -31,7 +31,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
@@ -51,7 +51,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
@@ -60,7 +60,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
@@ -71,7 +71,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
@@ -80,7 +80,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
@@ -91,7 +91,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
@@ -100,7 +100,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
@@ -111,7 +111,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
@@ -120,7 +120,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
@@ -131,7 +131,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
@@ -140,7 +140,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
@@ -151,7 +151,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
@@ -160,7 +160,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
@@ -171,7 +171,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
@@ -180,7 +180,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
@@ -191,7 +191,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
@@ -200,7 +200,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
@@ -211,7 +211,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
@@ -220,7 +220,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
@@ -231,7 +231,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
@@ -240,7 +240,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
@@ -251,7 +251,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
@@ -260,7 +260,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
@@ -271,7 +271,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
@@ -291,7 +291,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
@@ -300,7 +300,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
@@ -311,7 +311,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
@@ -320,7 +320,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
@@ -331,7 +331,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
@@ -340,7 +340,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
@@ -351,7 +351,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
@@ -360,7 +360,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
@@ -371,7 +371,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
@@ -380,7 +380,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
@@ -391,7 +391,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
@@ -400,7 +400,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
@@ -411,7 +411,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
@@ -420,7 +420,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
@@ -431,7 +431,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
   <vscale x 1 x i8>,
   i8,
   <vscale x 1 x i1>,
@@ -451,7 +451,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     i8 %1,
     <vscale x 1 x i1> %2,
@@ -460,7 +460,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
   <vscale x 2 x i8>,
   i8,
   <vscale x 2 x i1>,
@@ -471,7 +471,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     i8 %1,
     <vscale x 2 x i1> %2,
@@ -480,7 +480,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
   <vscale x 4 x i8>,
   i8,
   <vscale x 4 x i1>,
@@ -491,7 +491,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     i8 %1,
     <vscale x 4 x i1> %2,
@@ -500,7 +500,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
   <vscale x 8 x i8>,
   i8,
   <vscale x 8 x i1>,
@@ -511,7 +511,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     i8 %1,
     <vscale x 8 x i1> %2,
@@ -520,7 +520,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
   <vscale x 16 x i8>,
   i8,
   <vscale x 16 x i1>,
@@ -531,7 +531,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     i8 %1,
     <vscale x 16 x i1> %2,
@@ -540,7 +540,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
   <vscale x 32 x i8>,
   i8,
   <vscale x 32 x i1>,
@@ -551,7 +551,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     i8 %1,
     <vscale x 32 x i1> %2,
@@ -560,7 +560,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+declare <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
   <vscale x 64 x i8>,
   i8,
   <vscale x 64 x i1>,
@@ -571,7 +571,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8(
+  %a = call <vscale x 64 x i1> @llvm.riscv.vmsbc.borrow.in.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     i8 %1,
     <vscale x 64 x i1> %2,
@@ -580,7 +580,7 @@ entry:
   ret <vscale x 64 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
   <vscale x 1 x i16>,
   i16,
   <vscale x 1 x i1>,
@@ -591,7 +591,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     i16 %1,
     <vscale x 1 x i1> %2,
@@ -600,7 +600,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
   <vscale x 2 x i16>,
   i16,
   <vscale x 2 x i1>,
@@ -611,7 +611,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     i16 %1,
     <vscale x 2 x i1> %2,
@@ -620,7 +620,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
   <vscale x 4 x i16>,
   i16,
   <vscale x 4 x i1>,
@@ -631,7 +631,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     i16 %1,
     <vscale x 4 x i1> %2,
@@ -640,7 +640,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
   <vscale x 8 x i16>,
   i16,
   <vscale x 8 x i1>,
@@ -651,7 +651,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     i16 %1,
     <vscale x 8 x i1> %2,
@@ -660,7 +660,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
   <vscale x 16 x i16>,
   i16,
   <vscale x 16 x i1>,
@@ -671,7 +671,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     i16 %1,
     <vscale x 16 x i1> %2,
@@ -680,7 +680,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+declare <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
   <vscale x 32 x i16>,
   i16,
   <vscale x 32 x i1>,
@@ -691,7 +691,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16(
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsbc.borrow.in.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     i16 %1,
     <vscale x 32 x i1> %2,
@@ -700,7 +700,7 @@ entry:
   ret <vscale x 32 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
@@ -711,7 +711,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     i32 %1,
     <vscale x 1 x i1> %2,
@@ -720,7 +720,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
@@ -731,7 +731,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     i32 %1,
     <vscale x 2 x i1> %2,
@@ -740,7 +740,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
@@ -751,7 +751,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     i32 %1,
     <vscale x 4 x i1> %2,
@@ -760,7 +760,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
@@ -771,7 +771,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     i32 %1,
     <vscale x 8 x i1> %2,
@@ -780,7 +780,7 @@ entry:
   ret <vscale x 8 x i1> %a
 }
 
-declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+declare <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
   <vscale x 16 x i32>,
   i32,
   <vscale x 16 x i1>,
@@ -791,7 +791,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32(
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsbc.borrow.in.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     i32 %1,
     <vscale x 16 x i1> %2,
@@ -800,7 +800,7 @@ entry:
   ret <vscale x 16 x i1> %a
 }
 
-declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64(
+declare <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
@@ -811,7 +811,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64(
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsbc.borrow.in.nxv1i64.i64(
     <vscale x 1 x i64> %0,
     i64 %1,
     <vscale x 1 x i1> %2,
@@ -820,7 +820,7 @@ entry:
   ret <vscale x 1 x i1> %a
 }
 
-declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64(
+declare <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
@@ -831,7 +831,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64(
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsbc.borrow.in.nxv2i64.i64(
     <vscale x 2 x i64> %0,
     i64 %1,
     <vscale x 2 x i1> %2,
@@ -840,7 +840,7 @@ entry:
   ret <vscale x 2 x i1> %a
 }
 
-declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64(
+declare <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
@@ -851,7 +851,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64(
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsbc.borrow.in.nxv4i64.i64(
     <vscale x 4 x i64> %0,
     i64 %1,
     <vscale x 4 x i1> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 4 x i1> %a
 }
 
-declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64(
+declare <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
   <vscale x 8 x i64>,
   i64,
   <vscale x 8 x i1>,
@@ -871,7 +871,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0
-  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64(
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsbc.borrow.in.nxv8i64.i64(
     <vscale x 8 x i64> %0,
     i64 %1,
     <vscale x 8 x i1> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
index d22ac605a20b..ed540fad7c98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv32.ll
@@ -1,503 +1,503 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
   i8,
   i32);
 
-define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,mf8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
   i8,
   i32);
 
-define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,mf4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
   i8,
   i32);
 
-define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,mf2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
   i8,
   i32);
 
-define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
   i8,
   i32);
 
-define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
   i8,
   i32);
 
-define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
   i8,
   i32);
 
-define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 %0,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
   i16,
   i32);
 
-define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,mf4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 %0,
     i32 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
   i16,
   i32);
 
-define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,mf2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 %0,
     i32 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
   i16,
   i32);
 
-define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 %0,
     i32 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
   i16,
   i32);
 
-define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 %0,
     i32 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
   i16,
   i32);
 
-define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 %0,
     i32 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
   i16,
   i32);
 
-define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 %0,
     i32 %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
   i32,
   i32);
 
-define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,mf2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 %0,
     i32 %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
   i32,
   i32);
 
-define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 %0,
     i32 %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
   i32,
   i32);
 
-define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 %0,
     i32 %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
   i32,
   i32);
 
-define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 %0,
     i32 %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
   i32,
   i32);
 
-define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 %0,
     i32 %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8_i8(i32 %0) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8_i8(i32 %0) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8_i8(i32 %0) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8_i8(i32 %0) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8_i8(i32 %0) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8_i8(i32 %0) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8_i8(i32 %0) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 9,
     i32 %0)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16_i16(i32 %0) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 9,
     i32 %0)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16_i16(i32 %0) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 9,
     i32 %0)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16_i16(i32 %0) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 9,
     i32 %0)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16_i16(i32 %0) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 9,
     i32 %0)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16_i16(i32 %0) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 9,
     i32 %0)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16_i16(i32 %0) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 9,
     i32 %0)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32_i32(i32 %0) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 9,
     i32 %0)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32_i32(i32 %0) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 9,
     i32 %0)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32_i32(i32 %0) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 9,
     i32 %0)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32_i32(i32 %0) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 9,
     i32 %0)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32_i32(i32 %0) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(i32 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 9,
     i32 %0)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
index 21b22f6c3f0f..5b5e303c7b2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv.v.x-rv64.ll
@@ -1,615 +1,615 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+declare <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
   i8,
   i64);
 
-define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_x_nxv1i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,mf8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+declare <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
   i8,
   i64);
 
-define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_x_nxv2i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,mf4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+declare <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
   i8,
   i64);
 
-define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_x_nxv4i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,mf2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
   i8,
   i64);
 
-define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_x_nxv8i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+declare <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
   i8,
   i64);
 
-define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_x_nxv16i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+declare <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
   i8,
   i64);
 
-define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_x_nxv32i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+declare <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
   i8,
   i64);
 
-define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8_i8(i8 %0, i64 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_x_nxv64i8(i8 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv64i8
 ; CHECK:       vsetvli {{.*}}, a1, e8,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 %0,
     i64 %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+declare <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
   i16,
   i64);
 
-define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16_i16(i16 %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_x_nxv1i16(i16 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,mf4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 %0,
     i64 %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+declare <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
   i16,
   i64);
 
-define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16_i16(i16 %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_x_nxv2i16(i16 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,mf2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 %0,
     i64 %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+declare <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
   i16,
   i64);
 
-define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16_i16(i16 %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_x_nxv4i16(i16 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 %0,
     i64 %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+declare <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
   i16,
   i64);
 
-define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16_i16(i16 %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_x_nxv8i16(i16 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 %0,
     i64 %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+declare <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
   i16,
   i64);
 
-define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16_i16(i16 %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_x_nxv16i16(i16 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 %0,
     i64 %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+declare <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
   i16,
   i64);
 
-define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16_i16(i16 %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_x_nxv32i16(i16 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv32i16
 ; CHECK:       vsetvli {{.*}}, a1, e16,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 %0,
     i64 %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+declare <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
   i32,
   i64);
 
-define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32_i32(i32 %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_x_nxv1i32(i32 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,mf2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 %0,
     i64 %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+declare <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
   i32,
   i64);
 
-define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32_i32(i32 %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_x_nxv2i32(i32 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 %0,
     i64 %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+declare <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
   i32,
   i64);
 
-define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32_i32(i32 %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_x_nxv4i32(i32 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 %0,
     i64 %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+declare <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
   i32,
   i64);
 
-define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32_i32(i32 %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_x_nxv8i32(i32 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 %0,
     i64 %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+declare <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
   i32,
   i64);
 
-define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32_i32(i32 %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_x_nxv16i32(i32 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv16i32
 ; CHECK:       vsetvli {{.*}}, a1, e32,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 %0,
     i64 %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+declare <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
   i64,
   i64);
 
-define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64_i64(i64 %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(i64 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv1i64
 ; CHECK:       vsetvli {{.*}}, a1, e64,m1
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 %0,
     i64 %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+declare <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
   i64,
   i64);
 
-define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64_i64(i64 %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmv.v.x_x_nxv2i64(i64 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv2i64
 ; CHECK:       vsetvli {{.*}}, a1, e64,m2
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 %0,
     i64 %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+declare <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
   i64,
   i64);
 
-define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64_i64(i64 %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmv.v.x_x_nxv4i64(i64 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv4i64
 ; CHECK:       vsetvli {{.*}}, a1, e64,m4
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 %0,
     i64 %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+declare <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
   i64,
   i64);
 
-define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64_i64(i64 %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmv.v.x_x_nxv8i64(i64 %0, i64 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_x_nxv8i64
 ; CHECK:       vsetvli {{.*}}, a1, e64,m8
 ; CHECK:       vmv.v.x {{v[0-9]+}}, a0
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 %0,
     i64 %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8_i8(i64 %0) nounwind {
+define <vscale x 1 x i8> @intrinsic_vmv.v.x_i_nxv1i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8_i8(i64 %0) nounwind {
+define <vscale x 2 x i8> @intrinsic_vmv.v.x_i_nxv2i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8_i8(i64 %0) nounwind {
+define <vscale x 4 x i8> @intrinsic_vmv.v.x_i_nxv4i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8_i8(i64 %0) nounwind {
+define <vscale x 8 x i8> @intrinsic_vmv.v.x_i_nxv8i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8_i8(i64 %0) nounwind {
+define <vscale x 16 x i8> @intrinsic_vmv.v.x_i_nxv16i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8_i8(i64 %0) nounwind {
+define <vscale x 32 x i8> @intrinsic_vmv.v.x_i_nxv32i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8_i8(i64 %0) nounwind {
+define <vscale x 64 x i8> @intrinsic_vmv.v.x_i_nxv64i8(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8_i8
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv64i8
 ; CHECK:       vsetvli {{.*}}, a0, e8,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8(
     i8 9,
     i64 %0)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16_i16(i64 %0) nounwind {
+define <vscale x 1 x i16> @intrinsic_vmv.v.x_i_nxv1i16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16(
     i16 9,
     i64 %0)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16_i16(i64 %0) nounwind {
+define <vscale x 2 x i16> @intrinsic_vmv.v.x_i_nxv2i16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,mf2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16(
     i16 9,
     i64 %0)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16_i16(i64 %0) nounwind {
+define <vscale x 4 x i16> @intrinsic_vmv.v.x_i_nxv4i16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16(
     i16 9,
     i64 %0)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16_i16(i64 %0) nounwind {
+define <vscale x 8 x i16> @intrinsic_vmv.v.x_i_nxv8i16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16(
     i16 9,
     i64 %0)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16_i16(i64 %0) nounwind {
+define <vscale x 16 x i16> @intrinsic_vmv.v.x_i_nxv16i16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16(
     i16 9,
     i64 %0)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16_i16(i64 %0) nounwind {
+define <vscale x 32 x i16> @intrinsic_vmv.v.x_i_nxv32i16(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16_i16
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv32i16
 ; CHECK:       vsetvli {{.*}}, a0, e16,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16(
     i16 9,
     i64 %0)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32_i32(i64 %0) nounwind {
+define <vscale x 1 x i32> @intrinsic_vmv.v.x_i_nxv1i32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,mf2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32(
     i32 9,
     i64 %0)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32_i32(i64 %0) nounwind {
+define <vscale x 2 x i32> @intrinsic_vmv.v.x_i_nxv2i32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32(
     i32 9,
     i64 %0)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32_i32(i64 %0) nounwind {
+define <vscale x 4 x i32> @intrinsic_vmv.v.x_i_nxv4i32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32(
     i32 9,
     i64 %0)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32_i32(i64 %0) nounwind {
+define <vscale x 8 x i32> @intrinsic_vmv.v.x_i_nxv8i32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32(
     i32 9,
     i64 %0)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32_i32(i64 %0) nounwind {
+define <vscale x 16 x i32> @intrinsic_vmv.v.x_i_nxv16i32(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32_i32
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv16i32
 ; CHECK:       vsetvli {{.*}}, a0, e32,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32(
     i32 9,
     i64 %0)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64_i64(i64 %0) nounwind {
+define <vscale x 1 x i64> @intrinsic_vmv.v.x_i_nxv1i64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv1i64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m1
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64(
     i64 9,
     i64 %0)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64_i64(i64 %0) nounwind {
+define <vscale x 2 x i64> @intrinsic_vmv.v.x_i_nxv2i64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv2i64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m2
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64(
     i64 9,
     i64 %0)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64_i64(i64 %0) nounwind {
+define <vscale x 4 x i64> @intrinsic_vmv.v.x_i_nxv4i64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv4i64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m4
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64(
     i64 9,
     i64 %0)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64_i64(i64 %0) nounwind {
+define <vscale x 8 x i64> @intrinsic_vmv.v.x_i_nxv8i64(i64 %0) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64_i64
+; CHECK-LABEL: intrinsic_vmv.v.x_i_nxv8i64
 ; CHECK:       vsetvli {{.*}}, a0, e64,m8
 ; CHECK:       vmv.v.i {{v[0-9]+}}, 9
-  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64(
     i64 9,
     i64 %0)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
index a12745886ea1..b9e88f1673f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpopc-rv32.ll
@@ -1,32 +1,32 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
 ; RUN:   --riscv-no-aliases < %s | FileCheck %s
-declare i32 @llvm.riscv.vpopc.i64.nxv1i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv1i1(
   <vscale x 1 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv1i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv1i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv1i1(
     <vscale x 1 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv1i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
   <vscale x 1 x i1>,
   <vscale x 1 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv1i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf8,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv1i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
     <vscale x 1 x i1> %0,
     <vscale x 1 x i1> %1,
     i32 %2)
@@ -34,33 +34,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.i64.nxv2i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv2i1(
   <vscale x 2 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv2i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv2i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv2i1(
     <vscale x 2 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv2i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
   <vscale x 2 x i1>,
   <vscale x 2 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv2i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf4,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv2i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
     <vscale x 2 x i1> %0,
     <vscale x 2 x i1> %1,
     i32 %2)
@@ -68,33 +68,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.i64.nxv4i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv4i1(
   <vscale x 4 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv4i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv4i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv4i1(
     <vscale x 4 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv4i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
   <vscale x 4 x i1>,
   <vscale x 4 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv4i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,mf2,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv4i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
     <vscale x 4 x i1> %0,
     <vscale x 4 x i1> %1,
     i32 %2)
@@ -102,33 +102,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.i64.nxv8i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv8i1(
   <vscale x 8 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv8i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv8i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv8i1(
     <vscale x 8 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv8i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
   <vscale x 8 x i1>,
   <vscale x 8 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv8i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m1,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv8i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
     <vscale x 8 x i1> %0,
     <vscale x 8 x i1> %1,
     i32 %2)
@@ -136,33 +136,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.i64.nxv16i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv16i1(
   <vscale x 16 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv16i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv16i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv16i1(
     <vscale x 16 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv16i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
   <vscale x 16 x i1>,
   <vscale x 16 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv16i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m2,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv16i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
     <vscale x 16 x i1> %0,
     <vscale x 16 x i1> %1,
     i32 %2)
@@ -170,33 +170,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.i64.nxv32i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv32i1(
   <vscale x 32 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv32i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv32i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv32i1(
     <vscale x 32 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv32i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
   <vscale x 32 x i1>,
   <vscale x 32 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv32i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m4,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv32i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
     <vscale x 32 x i1> %0,
     <vscale x 32 x i1> %1,
     i32 %2)
@@ -204,33 +204,33 @@ entry:
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.i64.nxv64i1(
+declare i32 @llvm.riscv.vpopc.i32.nxv64i1(
   <vscale x 64 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_m_i64_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+define i32 @intrinsic_vpopc_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1
+; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv64i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}
-  %a = call i32 @llvm.riscv.vpopc.i64.nxv64i1(
+  %a = call i32 @llvm.riscv.vpopc.i32.nxv64i1(
     <vscale x 64 x i1> %0,
     i32 %1)
 
   ret i32 %a
 }
 
-declare i32 @llvm.riscv.vpopc.mask.i64.nxv64i1(
+declare i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
   <vscale x 64 x i1>,
   <vscale x 64 x i1>,
   i32);
 
-define i32 @intrinsic_vpopc_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+define i32 @intrinsic_vpopc_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
 entry:
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1
+; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv64i1
 ; CHECK:       vsetvli {{.*}}, a0, e8,m8,ta,mu
 ; CHECK:       vpopc.m a0, {{v[0-9]+}}, v0.t
-  %a = call i32 @llvm.riscv.vpopc.mask.i64.nxv64i1(
+  %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
     <vscale x 64 x i1> %0,
     <vscale x 64 x i1> %1,
     i32 %2)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
index 305efe880361..ff7a2f858dc5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
index eeeeb9242467..b2e8230ab22e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredand-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredand.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
index 5a2c7ab416c7..798211e73c35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
index 939ae4599031..9dbddfa17670 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmax-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredmax.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
index 734fc33ba4fd..a25614f1f213 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
index 68af5ac3897a..f05d58998aa4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmaxu-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredmaxu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
index 6da1738afe20..732b354aff3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
index 6c6bc85ca159..0f3ac5b21657 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredmin-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredmin.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
index 9f26f5dea491..66c3d0714352 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
index 559a870c8354..3fe847466db7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredminu-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredminu.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
index 5b0e2f099675..8aeb10b90e9c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
index 9b8022081411..caeda82fa5ae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredor-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
index b2b95979526d..ddc920c681ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
index 7641b6ca0a4d..68f8b173173f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredsum-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredsum.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
index 3ea6a2532085..37aeb333a456 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv32.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
index b61705eb8159..9683a3b29056 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vredxor-rv64.ll
@@ -20,7 +20,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
   <vscale x 8 x i8>,
   <vscale x 1 x i8>,
   <vscale x 8 x i8>,
@@ -32,7 +32,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.nxv1i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8(
     <vscale x 8 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -62,7 +62,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
   <vscale x 8 x i8>,
   <vscale x 2 x i8>,
   <vscale x 8 x i8>,
@@ -74,7 +74,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.nxv2i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8(
     <vscale x 8 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -104,7 +104,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
   <vscale x 8 x i8>,
   <vscale x 4 x i8>,
   <vscale x 8 x i8>,
@@ -116,7 +116,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.nxv4i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8(
     <vscale x 8 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -146,7 +146,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
@@ -158,7 +158,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.nxv8i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -188,7 +188,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
   <vscale x 8 x i8>,
   <vscale x 16 x i8>,
   <vscale x 8 x i8>,
@@ -200,7 +200,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.nxv16i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8(
     <vscale x 8 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -230,7 +230,7 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+declare <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
   <vscale x 8 x i8>,
   <vscale x 32 x i8>,
   <vscale x 8 x i8>,
@@ -242,7 +242,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.nxv32i1(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8(
     <vscale x 8 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 8 x i8> %2,
@@ -272,7 +272,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
   <vscale x 4 x i16>,
   <vscale x 1 x i16>,
   <vscale x 4 x i16>,
@@ -284,7 +284,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.nxv1i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16(
     <vscale x 4 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -314,7 +314,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
   <vscale x 4 x i16>,
   <vscale x 2 x i16>,
   <vscale x 4 x i16>,
@@ -326,7 +326,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.nxv2i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16(
     <vscale x 4 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -356,7 +356,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
@@ -368,7 +368,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.nxv4i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -398,7 +398,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
   <vscale x 4 x i16>,
   <vscale x 8 x i16>,
   <vscale x 4 x i16>,
@@ -410,7 +410,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.nxv8i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16(
     <vscale x 4 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -440,7 +440,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
   <vscale x 4 x i16>,
   <vscale x 16 x i16>,
   <vscale x 4 x i16>,
@@ -452,7 +452,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.nxv16i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16(
     <vscale x 4 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -482,7 +482,7 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+declare <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
   <vscale x 4 x i16>,
   <vscale x 32 x i16>,
   <vscale x 4 x i16>,
@@ -494,7 +494,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.nxv32i1(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16(
     <vscale x 4 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 4 x i16> %2,
@@ -524,7 +524,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
   <vscale x 2 x i32>,
   <vscale x 1 x i32>,
   <vscale x 2 x i32>,
@@ -536,7 +536,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.nxv1i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32(
     <vscale x 2 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -566,7 +566,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
@@ -578,7 +578,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.nxv2i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -608,7 +608,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
   <vscale x 2 x i32>,
   <vscale x 4 x i32>,
   <vscale x 2 x i32>,
@@ -620,7 +620,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.nxv4i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32(
     <vscale x 2 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -650,7 +650,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
   <vscale x 2 x i32>,
   <vscale x 8 x i32>,
   <vscale x 2 x i32>,
@@ -662,7 +662,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.nxv8i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32(
     <vscale x 2 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -692,7 +692,7 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+declare <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
   <vscale x 2 x i32>,
   <vscale x 16 x i32>,
   <vscale x 2 x i32>,
@@ -704,7 +704,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.nxv16i1(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32(
     <vscale x 2 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 2 x i32> %2,
@@ -734,7 +734,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.nxv1i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
@@ -746,7 +746,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.nxv1i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -776,7 +776,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.nxv2i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
   <vscale x 1 x i64>,
   <vscale x 2 x i64>,
   <vscale x 1 x i64>,
@@ -788,7 +788,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.nxv2i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64(
     <vscale x 1 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -818,7 +818,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.nxv4i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
   <vscale x 1 x i64>,
   <vscale x 4 x i64>,
   <vscale x 1 x i64>,
@@ -830,7 +830,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.nxv4i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64(
     <vscale x 1 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 1 x i64> %2,
@@ -860,7 +860,7 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.nxv8i1(
+declare <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
   <vscale x 1 x i64>,
   <vscale x 8 x i64>,
   <vscale x 1 x i64>,
@@ -872,7 +872,7 @@ entry:
 ; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
 ; CHECK:       vredxor.vs {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
-  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.nxv8i1(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64(
     <vscale x 1 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 1 x i64> %2,


        


More information about the llvm-branch-commits mailing list