[llvm] a413099 - [RISCV] Pre-commit test cases for D103211. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri May 28 13:24:11 PDT 2021


Author: Craig Topper
Date: 2021-05-28T13:21:58-07:00
New Revision: a41309966a862be3817d05e4ae9ece60a7d8de86

URL: https://github.com/llvm/llvm-project/commit/a41309966a862be3817d05e4ae9ece60a7d8de86
DIFF: https://github.com/llvm/llvm-project/commit/a41309966a862be3817d05e4ae9ece60a7d8de86.diff

LOG: [RISCV] Pre-commit test cases for D103211. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
index a0f53677569e..d9967d723eaa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
@@ -821,3 +821,336 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    half %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    half %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    half %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    half %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    half %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
index a291f8e4a890..6f56e8752405 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll
@@ -821,3 +821,336 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    half %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    half %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    half %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    half %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    half %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwadd.w.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwadd.w.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwadd.w.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfwadd.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwadd.w.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
index 37f4de9281bc..7683a9ddc261 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll
@@ -821,3 +821,336 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    half %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    half %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    half %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    half %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    half %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
index bc421129ca85..2c0b03998a5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll
@@ -821,3 +821,336 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    <vscale x 1 x half> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    <vscale x 2 x half> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    <vscale x 4 x half> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    <vscale x 8 x half> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    <vscale x 16 x half> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f64_nxv1f64_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vfwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.nxv1f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    <vscale x 1 x float> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f64_nxv2f64_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vfwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.nxv2f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    <vscale x 2 x float> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f64_nxv4f64_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vfwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.nxv4f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    <vscale x 4 x float> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f64_nxv8f64_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vfwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.nxv8f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    <vscale x 8 x float> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x double> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vfwsub.w.mask.nxv1f32.f16(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float> %0,
+    half %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vfwsub.w.mask.nxv2f32.f16(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float> %0,
+    half %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vfwsub.w.mask.nxv4f32.f16(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float> %0,
+    half %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vfwsub.w.mask.nxv8f32.f16(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float> %0,
+    half %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.h.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x float> @llvm.riscv.vfwsub.w.mask.nxv16f32.f16(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float> %0,
+    half %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f64_nxv1f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vfwsub.w.mask.nxv1f64.f32(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double> %0,
+    float %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f64_nxv2f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vfwsub.w.mask.nxv2f64.f32(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double> %0,
+    float %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f64_nxv4f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vfwsub.w.mask.nxv4f64.f32(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double> %0,
+    float %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f64_nxv8f64_f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a0
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vfwsub.wf v8, v8, ft0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vfwsub.w.mask.nxv8f64.f32(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double> %0,
+    float %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x double> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
index d9d9ea21b720..0af7d4cd9df4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv32.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
index 7bbc078a106d..52df48552124 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd.w-rv64.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwadd.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwadd.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwadd.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwadd.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
index 186cce0f653c..5939659eec4a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv32.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
index 84606e1bcbed..3a78f20e735a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwaddu.w-rv64.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwaddu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwaddu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwaddu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwaddu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwaddu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
index 33481ea3bfb3..4f999e8a4b73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv32.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
index 713c29c328c4..9005395694e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsub.w-rv64.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsub.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsub.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsub.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsub.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsub.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsub.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsub.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwsub.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsub.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
index 7bf73f4f9362..49bebf1b294c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv32.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i64> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
index dde0b1cbf343..9f00028f0a7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsubu.w-rv64.ll
@@ -1338,3 +1338,543 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i8> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i8> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i8> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i8> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i8> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i8> %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i16> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i16> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i16> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i16> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i16> %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vmv1r.v v25, v8
+; CHECK-NEXT:    vwsubu.wv v25, v8, v9, v0.t
+; CHECK-NEXT:    vmv1r.v v8, v25
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.nxv1i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i32> %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vmv2r.v v26, v8
+; CHECK-NEXT:    vwsubu.wv v26, v8, v10, v0.t
+; CHECK-NEXT:    vmv2r.v v8, v26
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.nxv2i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i32> %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vmv4r.v v28, v8
+; CHECK-NEXT:    vwsubu.wv v28, v8, v12, v0.t
+; CHECK-NEXT:    vmv4r.v v8, v28
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.nxv4i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i32> %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vmv8r.v v24, v8
+; CHECK-NEXT:    vwsubu.wv v24, v8, v16, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.nxv8i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i32> %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf8,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %0,
+    i8 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %0,
+    i8 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,mf2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %0,
+    i8 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m1,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %0,
+    i8 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %0,
+    i8 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e8,m4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %0,
+    i8 %1,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %0,
+    i16 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,mf2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %0,
+    i16 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m1,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %0,
+    i16 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %0,
+    i16 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e16,m4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %0,
+    i16 %1,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,mf2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vwsubu.w.mask.nxv1i64.i32(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %0,
+    i32 %1,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m1,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vwsubu.w.mask.nxv2i64.i32(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %0,
+    i32 %1,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m2,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vwsubu.w.mask.nxv4i64.i32(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %0,
+    i32 %1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli zero, a1, e32,m4,tu,mu
+; CHECK-NEXT:    vwsubu.wx v8, v8, a0, v0.t
+; CHECK-NEXT:    ret
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vwsubu.w.mask.nxv8i64.i32(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %0,
+    i32 %1,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}


        


More information about the llvm-commits mailing list