[llvm] 41ff1e0 - [RISCV] Improve register allocation for masked vwadd(u).wv, vwsub(u).wv, vfwadd.wv, and vfwsub.wv.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 1 19:04:49 PDT 2021


Author: Craig Topper
Date: 2021-06-01T18:59:00-07:00
New Revision: 41ff1e0e29bbd3c12c8fdb0a0bbed8bda95dec85

URL: https://github.com/llvm/llvm-project/commit/41ff1e0e29bbd3c12c8fdb0a0bbed8bda95dec85
DIFF: https://github.com/llvm/llvm-project/commit/41ff1e0e29bbd3c12c8fdb0a0bbed8bda95dec85.diff

LOG: [RISCV] Improve register allocation for masked vwadd(u).wv, vwsub(u).wv, vfwadd.wv, and vfwsub.wv.

The first source has the same EEW as the destination, but we're
using earlyclobber which prevents them from ever being the same
register.

To workaround this, add a special TIED pseudo to use whenever the
first source and merge operand are the same value. This allows
us to use a single operand for the merge operand and first source
which we can then tie to the destination. A tied source disables
earlyclobber for that operand.

Reviewed By: arcbbb

Differential Revision: https://reviews.llvm.org/D103211

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 7715c2e91ef0e..9e5720cfe516d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -577,10 +577,11 @@ class PseudoToVInst<string PseudoInst> {
                  !subst("_B64", "",
                  !subst("_MASK", "",
                  !subst("_COMMUTABLE", "",
+                 !subst("_TIED", "",
                  !subst("F16", "F",
                  !subst("F32", "F",
                  !subst("F64", "F",
-                 !subst("Pseudo", "", PseudoInst))))))))))))))))))));
+                 !subst("Pseudo", "", PseudoInst)))))))))))))))))))));
 }
 
 // The destination vector register group for a masked vector instruction cannot
@@ -975,6 +976,27 @@ class VPseudoBinaryMOutMask<VReg RetClass,
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+// Special version of VPseudoBinaryMask where we pretend the first source is
+// tied to the destination so we can workaround the earlyclobber constraint.
+// This allows maskedoff and rs2 to be the same register.
+class VPseudoTiedBinaryMask<VReg RetClass,
+                            DAGOperand Op2Class,
+                            string Constraint> :
+        Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+                (ins GetVRegNoV0<RetClass>.R:$merge,
+                     Op2Class:$rs1,
+                     VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
+        RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 0; // Merge is also rs2.
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 class VPseudoBinaryCarryIn<VReg RetClass,
                            VReg Op1Class,
                            DAGOperand Op2Class,
@@ -1482,6 +1504,16 @@ multiclass VPseudoBinaryEmul<VReg RetClass,
   }
 }
 
+multiclass VPseudoTiedBinary<VReg RetClass,
+                             DAGOperand Op2Class,
+                             LMULInfo MInfo,
+                             string Constraint = ""> {
+  let VLMul = MInfo.value in {
+    def "_" # MInfo.MX # "_MASK_TIED" : VPseudoTiedBinaryMask<RetClass, Op2Class,
+                                                         Constraint>;
+  }
+}
+
 multiclass VPseudoBinaryV_VV<string Constraint = ""> {
   foreach m = MxList.m in
     defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
@@ -1554,9 +1586,12 @@ multiclass VPseudoBinaryW_VF {
 }
 
 multiclass VPseudoBinaryW_WV {
-  foreach m = MxListW.m in
+  foreach m = MxListW.m in {
     defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
                              "@earlyclobber $rd">;
+    defm _WV : VPseudoTiedBinary<m.wvrclass, m.vrclass, m,
+                                 "@earlyclobber $rd">;
+  }
 }
 
 multiclass VPseudoBinaryW_WX {
@@ -2260,6 +2295,25 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
                    (op2_type op2_kind:$rs2),
                    (mask_type V0), GPR:$vl, sew)>;
 
+class VPatTiedBinaryMask<string intrinsic_name,
+                         string inst,
+                         ValueType result_type,
+                         ValueType op2_type,
+                         ValueType mask_type,
+                         int sew,
+                         VReg result_reg_class,
+                         DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
+                   (result_type result_reg_class:$merge),
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_kind:$rs2),
+                   (mask_type V0),
+                   (XLenVT (VLOp GPR:$vl)))),
+                   (!cast<Instruction>(inst#"_MASK_TIED")
+                   (result_type result_reg_class:$merge),
+                   (op2_type op2_kind:$rs2),
+                   (mask_type V0), GPR:$vl, sew)>;
+
 class VPatTernaryNoMask<string intrinsic,
                         string inst,
                         string kind,
@@ -2642,6 +2696,10 @@ multiclass VPatBinaryW_WV<string intrinsic, string instruction,
   foreach VtiToWti = vtilist in {
     defvar Vti = VtiToWti.Vti;
     defvar Wti = VtiToWti.Wti;
+    let AddedComplexity = 1 in
+    def : VPatTiedBinaryMask<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
+                             Wti.Vector, Vti.Vector, Vti.Mask,
+                             Vti.Log2SEW, Wti.RegClass, Vti.RegClass>;
     defm : VPatBinary<intrinsic, instruction # "_WV_" # Vti.LMul.MX,
                       Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
                       Vti.Log2SEW, Wti.RegClass,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
index d9967d723eaad..bcbd3064454a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
@@ -826,9 +826,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
@@ -845,9 +843,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
@@ -864,9 +860,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
@@ -883,9 +877,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
@@ -902,9 +894,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_n
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
@@ -921,9 +911,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
@@ -940,9 +928,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
@@ -959,9 +945,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
@@ -978,9 +962,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
index 6f56e87524058..a4a499597f440 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
@@ -826,9 +826,7 @@ define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
@@ -845,9 +843,7 @@ define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
@@ -864,9 +860,7 @@ define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
@@ -883,9 +877,7 @@ define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
@@ -902,9 +894,7 @@ define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_n
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
@@ -921,9 +911,7 @@ define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
@@ -940,9 +928,7 @@ define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
@@ -959,9 +945,7 @@ define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
@@ -978,9 +962,7 @@ define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv
 ; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
index 7683a9ddc2616..69b64abb7d0b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
@@ -826,9 +826,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
@@ -845,9 +843,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
@@ -864,9 +860,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
@@ -883,9 +877,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
@@ -902,9 +894,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_n
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
@@ -921,9 +911,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
@@ -940,9 +928,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
@@ -959,9 +945,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
@@ -978,9 +962,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
index 2c0b03998a5e5..f3782cad04dc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
@@ -826,9 +826,7 @@ define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
@@ -845,9 +843,7 @@ define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
@@ -864,9 +860,7 @@ define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
@@ -883,9 +877,7 @@ define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
@@ -902,9 +894,7 @@ define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_n
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
@@ -921,9 +911,7 @@ define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vfwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
@@ -940,9 +928,7 @@ define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vfwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
@@ -959,9 +945,7 @@ define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vfwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
@@ -978,9 +962,7 @@ define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv
 ; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vfwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
index 0af7d4cd9df4f..c56f4f4592a7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv1
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv3
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv1
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
index 52df485521243..fc03c38e19df2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv1
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv3
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv1
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32
 ; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
index 5939659eec4ad..dcbe23843d03b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
index 3a78f20e735a5..e5bb4a3c1b966 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i1
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwaddu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwaddu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i3
 ; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwaddu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
index 4f999e8a4b733..8f58e8c0b5d4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv1
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv3
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv1
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
index 9005395694e04..76dcdeba03586 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv1
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv3
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv1
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsub.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsub.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsub.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32
 ; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsub.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
index 49bebf1b294c9..79b858d51a261 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
index 9f00028f0a7de..2152b4d44d82c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
@@ -1343,9 +1343,7 @@ define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
@@ -1362,9 +1360,7 @@ define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
@@ -1381,9 +1377,7 @@ define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
@@ -1400,9 +1394,7 @@ define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
@@ -1419,9 +1411,7 @@ define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
@@ -1438,9 +1428,7 @@ define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
@@ -1457,9 +1445,7 @@ define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
@@ -1476,9 +1462,7 @@ define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
@@ -1495,9 +1479,7 @@ define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
@@ -1514,9 +1496,7 @@ define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i1
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
@@ -1533,9 +1513,7 @@ define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
@@ -1552,9 +1530,7 @@ define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
-; CHECK-NEXT:    vmv1r.v v25, v8
-; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    vwsubu.wv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
@@ -1571,9 +1547,7 @@ define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
-; CHECK-NEXT:    vmv2r.v v26, v8
-; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    vwsubu.wv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
@@ -1590,9 +1564,7 @@ define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
-; CHECK-NEXT:    vmv4r.v v28, v8
-; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
-; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    vwsubu.wv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
@@ -1609,9 +1581,7 @@ define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i3
 ; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
-; CHECK-NEXT:    vmv8r.v v24, v8
-; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
-; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    vwsubu.wv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(


        


More information about the llvm-commits mailing list