[llvm] 5c14267 - [Clang][RISCV] Add Zvabd intrinsics

via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 13 22:00:26 PST 2026


Author: Pengcheng Wang
Date: 2026-02-14T14:00:17+08:00
New Revision: 5c14267374799ca2ecb4a4965eed820ac72af786

URL: https://github.com/llvm/llvm-project/commit/5c14267374799ca2ecb4a4965eed820ac72af786
DIFF: https://github.com/llvm/llvm-project/commit/5c14267374799ca2ecb4a4965eed820ac72af786.diff

LOG: [Clang][RISCV] Add Zvabd intrinsics

Doc:

* https://github.com/riscv/integer-vector-absolute-difference
* https://github.com/riscv-non-isa/rvv-intrinsic-doc/pull/424

Authored-by: Zhenxuan Sang <sang at bytedance.com>
Co-Authored-by: Pengcheng Wang <wangpengcheng.pp at bytedance.com>

Reviewers: preames, topperc, kito-cheng

Pull Request: https://github.com/llvm/llvm-project/pull/180929

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau.c
    llvm/test/CodeGen/RISCV/rvv/vabd.ll
    llvm/test/CodeGen/RISCV/rvv/vabdu.ll
    llvm/test/CodeGen/RISCV/rvv/vabs.ll
    llvm/test/CodeGen/RISCV/rvv/vwabda.ll
    llvm/test/CodeGen/RISCV/rvv/vwabdau.ll

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c899dc70fc0b7..0985ea8a4fdec 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2043,6 +2043,22 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
     defm vclmul  : RVVInt64BinBuiltinSet;
     defm vclmulh : RVVInt64BinBuiltinSet;
   }
+
+  // zvabd
+  let RequiredFeatures = ["zvabd"] in {
+    defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "Uv", "Uvv"]]>;
+    defm vabd : RVVOutBuiltinSet<"vabd", "cs", [["vv", "Uv", "Uvvv"]]>;
+    defm vabdu : RVVOutBuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
+  }
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand, HasMaskedOffOperand = false in {
+  let RequiredFeatures = ["zvabd"] in {
+    defm vwabda : RVVOutOp1Op2BuiltinSet<"vwabda", "cs",
+                                         [["vv", "Uw", "UwUwvv"]]>;
+    defm vwabdau : RVVOutOp1Op2BuiltinSet<"vwabdau", "cs",
+                                          [["vv", "Uw", "UwUwUvUv"]]>;
+  }
 }
 
 let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd.c
new file mode 100644
index 0000000000000..abd605d9c4463
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd.c
@@ -0,0 +1,280 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u8mf8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                               size_t vl) {
+  return __riscv_vabd_vv_u8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                               size_t vl) {
+  return __riscv_vabd_vv_u8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                               size_t vl) {
+  return __riscv_vabd_vv_u8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
+                               size_t vl) {
+  return __riscv_vabd_vv_u8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_m(vbool64_t vm, vint16mf4_t vs2,
+                                   vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_m(vbool32_t vm, vint16mf2_t vs2,
+                                   vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd_vv_u16m8_m(vm, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu.c
new file mode 100644
index 0000000000000..44983b000b18d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu.c
@@ -0,0 +1,282 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
+                                  vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
+                                  vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
+                                  vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu_vv_u8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu_vv_u8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu_vv_u8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu_vv_u8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
+                                    vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
+                                    vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
+                                  vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu_vv_u16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu_vv_u16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu_vv_u16m8_m(vm, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs.c
new file mode 100644
index 0000000000000..7badcf9f3b9b3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs.c
@@ -0,0 +1,447 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8(vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4(vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2(vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1(vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2(vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4(vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8(vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4(vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2(vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1(vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2(vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4(vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8(vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2(vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1(vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2(vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4(vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8(vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1(vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2(vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4(vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8(vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_m(vbool8_t vm, vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_m(vbool4_t vm, vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_m(vbool2_t vm, vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_m(vbool1_t vm, vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32mf2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m8_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m1_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m2_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m4_m(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m8_m(vm, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda.c
new file mode 100644
index 0000000000000..0941b364b486e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda.c
@@ -0,0 +1,253 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4(vuint16mf4_t vd, vint8mf8_t vs1,
+                                   vint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2(vuint16mf2_t vd, vint8mf4_t vs1,
+                                   vint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1(vuint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2(vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4(vuint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8(vuint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2(vuint32mf2_t vd, vint16mf4_t vs1,
+                                   vint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1(vuint32m1_t vd, vint16mf2_t vs1,
+                                 vint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2(vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4(vuint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8(vuint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda_vv_u32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd,
+                                     vint8mf8_t vs1, vint8mf8_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u16mf4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd,
+                                     vint8mf4_t vs1, vint8mf4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u16mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vint8mf2_t vs1,
+                                   vint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vint8m1_t vs1,
+                                   vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vint8m2_t vs1,
+                                   vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vint8m4_t vs1,
+                                   vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+                                     vint16mf4_t vs1, vint16mf4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+                                   vint16mf2_t vs1, vint16mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vwabda_vv_u32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vint16m1_t vs1,
+                                   vint16m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vint16m2_t vs1,
+                                   vint16m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vint16m4_t vs1,
+                                   vint16m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m8_m(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau.c
new file mode 100644
index 0000000000000..64ba4e9f2651c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau.c
@@ -0,0 +1,257 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1,
+                                    vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16mf4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1,
+                                    vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1,
+                                  vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1,
+                                  vuint8m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1,
+                                  vuint8m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1,
+                                  vuint8m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1,
+                                    vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32mf2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1,
+                                  vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m1(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1,
+                                  vuint16m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m2(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1,
+                                  vuint16m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m4(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1,
+                                  vuint16m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m8(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u16mf4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u16mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd,
+                                    vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau_vv_u16m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1,
+                                    vuint8m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1,
+                                    vuint8m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1,
+                                    vuint8m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m8_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u32mf2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+                                    vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau_vv_u32m1_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd,
+                                    vuint16m1_t vs1, vuint16m1_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau_vv_u32m2_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd,
+                                    vuint16m2_t vs1, vuint16m2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau_vv_u32m4_m(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd,
+                                    vuint16m4_t vs1, vuint16m4_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau_vv_u32m8_m(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd.c
new file mode 100644
index 0000000000000..55293b923544c
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd.c
@@ -0,0 +1,280 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                               size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                               size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                               size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
+                               size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_m(vbool64_t vm, vint16mf4_t vs2,
+                                   vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_m(vbool32_t vm, vint16mf2_t vs2,
+                                   vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vabd(vm, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu.c
new file mode 100644
index 0000000000000..71e7292ee38f0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu.c
@@ -0,0 +1,282 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
+                                  vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
+                                  vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
+                                  vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
+                                    vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
+                                    vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
+                                  vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vabdu(vm, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs.c
new file mode 100644
index 0000000000000..c08cbeef29e15
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs.c
@@ -0,0 +1,447 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8(vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4(vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2(vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1(vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2(vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4(vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8(vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4(vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2(vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1(vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2(vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4(vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8(vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2(vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1(vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2(vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4(vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8(vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1(vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2(vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4(vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8(vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_m(vbool64_t vm, vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_m(vbool32_t vm, vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_m(vbool16_t vm, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_m(vbool8_t vm, vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_m(vbool4_t vm, vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_m(vbool2_t vm, vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_m(vbool1_t vm, vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_m(vbool64_t vm, vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_m(vbool32_t vm, vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_m(vbool16_t vm, vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_m(vbool8_t vm, vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_m(vbool4_t vm, vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_m(vbool2_t vm, vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_m(vbool64_t vm, vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_m(vbool32_t vm, vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_m(vbool16_t vm, vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_m(vbool8_t vm, vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_m(vbool4_t vm, vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_m(vbool64_t vm, vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_m(vbool32_t vm, vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_m(vbool16_t vm, vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_m(vbool8_t vm, vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs(vm, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda.c
new file mode 100644
index 0000000000000..3c6d612b5efc1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda.c
@@ -0,0 +1,253 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4(vuint16mf4_t vd, vint8mf8_t vs1,
+                                   vint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2(vuint16mf2_t vd, vint8mf4_t vs1,
+                                   vint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1(vuint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2(vuint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4(vuint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8(vuint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2(vuint32mf2_t vd, vint16mf4_t vs1,
+                                   vint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1(vuint32m1_t vd, vint16mf2_t vs1,
+                                 vint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2(vuint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4(vuint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8(vuint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vwabda(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd,
+                                     vint8mf8_t vs1, vint8mf8_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd,
+                                     vint8mf4_t vs1, vint8mf4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd, vint8mf2_t vs1,
+                                   vint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vint8m1_t vs1,
+                                   vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vint8m2_t vs1,
+                                   vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vint8m4_t vs1,
+                                   vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+                                     vint16mf4_t vs1, vint16mf4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+                                   vint16mf2_t vs1, vint16mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd, vint16m1_t vs1,
+                                   vint16m1_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd, vint16m2_t vs1,
+                                   vint16m2_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd, vint16m4_t vs1,
+                                   vint16m4_t vs2, size_t vl) {
+  return __riscv_vwabda(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau.c
new file mode 100644
index 0000000000000..23462053017fe
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau.c
@@ -0,0 +1,257 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs1,
+                                    vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs1,
+                                    vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs1,
+                                  vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs1,
+                                  vuint8m1_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs1,
+                                  vuint8m2_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs1,
+                                  vuint8m4_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs1,
+                                    vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs1,
+                                  vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs1,
+                                  vuint16m1_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs1,
+                                  vuint16m2_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs1,
+                                  vuint16m4_t vs2, size_t vl) {
+  return __riscv_vwabdau(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_m(vbool16_t vm, vuint16m1_t vd,
+                                    vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_m(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1,
+                                    vuint8m1_t vs2, size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_m(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1,
+                                    vuint8m2_t vs2, size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_m(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1,
+                                    vuint8m4_t vs2, size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_m(vbool32_t vm, vuint32m1_t vd,
+                                    vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_m(vbool16_t vm, vuint32m2_t vd,
+                                    vuint16m1_t vs1, vuint16m1_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_m(vbool8_t vm, vuint32m4_t vd,
+                                    vuint16m2_t vs1, vuint16m2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_m(vbool4_t vm, vuint32m8_t vd,
+                                    vuint16m4_t vs1, vuint16m4_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabdau(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd.c
new file mode 100644
index 0000000000000..d245b025effd9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd.c
@@ -0,0 +1,585 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2,
+                                  vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2,
+                                  vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2,
+                                  vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2,
+                                    vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2,
+                                    vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2,
+                                  vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2,
+                                  vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2,
+                                  vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2,
+                                  vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                 vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabd_vv_u16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                     vint16mf2_t vs2, vint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabd_vv_u16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                    vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                    vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                    vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                  vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                  vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                  vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                  vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                      vint16mf4_t vs2, vint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vabd_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                      vint16mf2_t vs2, vint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vabd_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                    vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                    vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                    vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                    vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                  vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                  vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                  vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                    vint16mf4_t vs2, vint16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vabd_vv_u16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                    vint16mf2_t vs2, vint16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vabd_vv_u16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                  vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                  vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                  vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                  vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_u16m8_mu(vm, vd, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu.c
new file mode 100644
index 0000000000000..e3eb2f67ae0d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu.c
@@ -0,0 +1,603 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2,
+                                   vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2,
+                                   vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2,
+                                   vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2,
+                                     vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2,
+                                     vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
+                                   vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                    vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                  vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vabdu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vabdu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                    vuint16m8_t vs2, vuint16m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_vv_u16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                   vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                   vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                   vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                   vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vabdu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vabdu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m1_t vs2, vuint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m2_t vs2, vuint16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m4_t vs2, vuint16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                     vuint16m8_t vs2, vuint16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                   vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                   vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+                                   vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                 vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                 vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                 vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                 vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                     vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                     vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                   vuint16m1_t vs2, vuint16m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_vv_u16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m8_mu(vm, vd, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs.c
new file mode 100644
index 0000000000000..bf7ea7a761bb8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs.c
@@ -0,0 +1,956 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u8m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_tu(vuint32mf2_t vd, vint32mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_tu(vuint32m1_t vd, vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_tu(vuint32m2_t vd, vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_tu(vuint32m4_t vd, vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_tu(vuint32m8_t vd, vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_tu(vuint64m1_t vd, vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_tu(vuint64m2_t vd, vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_tu(vuint64m4_t vd, vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_tu(vuint64m8_t vd, vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_u64m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u8mf8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u8mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u8mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_v_u8m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_v_u8m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_v_u8m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_v_u8m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                    vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                    vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u16m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u16m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u16m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u16m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                    vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32mf2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vint32m1_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u32m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vint32m2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u32m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vint32m4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u32m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vint32m8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u32m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vint64m1_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u64m1_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vint64m2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u64m2_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vint64m4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u64m4_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vint64m8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_v_u64m8_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u8mf8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u8mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u8mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                     vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                     vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u16m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u16m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u16m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u16m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                     vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32mf2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vint32m1_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u32m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vint32m2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u32m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vint32m4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u32m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vint32m8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u32m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vint64m1_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u64m1_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vint64m2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u64m2_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vint64m4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u64m4_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vint64m8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_v_u64m8_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8mf8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u8mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_v_u8m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_v_u8m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_v_u8m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_v_u8m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                   vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                   vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u16mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u16m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u16m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u16m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u16m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                   vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_u32mf2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vint32m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u32m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vint32m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u32m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vint32m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u32m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vint32m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u32m8_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vint64m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u64m1_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vint64m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u64m2_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vint64m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u64m4_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vint64m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_v_u64m8_mu(vm, vd, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda.c
new file mode 100644
index 0000000000000..8c883fde1c80f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda.c
@@ -0,0 +1,511 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_tu(vuint16mf4_t vd, vint8mf8_t vs1,
+                                      vint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_tu(vuint16mf2_t vd, vint8mf4_t vs1,
+                                      vint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_tu(vuint16m1_t vd, vint8mf2_t vs1,
+                                    vint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_tu(vuint16m2_t vd, vint8m1_t vs1,
+                                    vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_tu(vuint16m4_t vd, vint8m2_t vs1,
+                                    vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_tu(vuint16m8_t vd, vint8m4_t vs1,
+                                    vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_tu(vuint32mf2_t vd, vint16mf4_t vs1,
+                                      vint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_tu(vuint32m1_t vd, vint16mf2_t vs1,
+                                    vint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_tu(vuint32m2_t vd, vint16m1_t vs1,
+                                    vint16m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_tu(vuint32m4_t vd, vint16m2_t vs1,
+                                    vint16m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_tu(vuint32m8_t vd, vint16m4_t vs1,
+                                    vint16m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vint8mf8_t vs1, vint8mf8_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabda_vv_u16mf4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vint8mf4_t vs1, vint8mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabda_vv_u16mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vint8mf2_t vs1, vint8mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u16m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vint8m1_t vs1,
+                                     vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vint8m2_t vs1,
+                                     vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vint8m4_t vs1,
+                                     vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vint16mf4_t vs1, vint16mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabda_vv_u32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vint16mf2_t vs1, vint16mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vint16m1_t vs1, vint16m1_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vint16m2_t vs1, vint16m2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                     vint16m4_t vs1, vint16m4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_vv_u32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vint8mf8_t vs1, vint8mf8_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabda_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vint8mf4_t vs1, vint8mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabda_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vint8mf2_t vs1, vint8mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u16m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                      vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vint16mf4_t vs1, vint16mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabda_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vint16mf2_t vs1, vint16mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vint16m1_t vs1, vint16m1_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vint16m2_t vs1, vint16m2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                      vint16m4_t vs1, vint16m4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vint8mf8_t vs1, vint8mf8_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u16mf4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vint8mf4_t vs1, vint8mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u16mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vint8m1_t vs1,
+                                    vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vint8m2_t vs1,
+                                    vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vint8m4_t vs1,
+                                    vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u16m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vint16mf4_t vs1, vint16mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_vv_u32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vint16mf2_t vs1, vint16mf2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabda_vv_u32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vint16m2_t vs1,
+                                    vint16m2_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vint16m4_t vs1,
+                                    vint16m4_t vs2, size_t vl) {
+  return __riscv_vwabda_vv_u32m8_mu(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau.c
new file mode 100644
index 0000000000000..769214736ab6f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau.c
@@ -0,0 +1,524 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1,
+                                       vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16mf4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1,
+                                       vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1,
+                                     vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1,
+                                     vuint8m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1,
+                                     vuint8m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1,
+                                     vuint8m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u16m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1,
+                                       vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32mf2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1,
+                                     vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m1_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1,
+                                     vuint16m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m2_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1,
+                                     vuint16m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m4_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1,
+                                     vuint16m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_vv_u32m8_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabdau_vv_u16mf4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabdau_vv_u16mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                      vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u16m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                      vuint8m1_t vs1, vuint8m1_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u16m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                      vuint8m2_t vs1, vuint8m2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u16m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                      vuint8m4_t vs1, vuint8m4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u16m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabdau_vv_u32mf2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                      vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u32m1_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                      vuint16m1_t vs1, vuint16m1_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u32m2_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                      vuint16m2_t vs1, vuint16m2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u32m4_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                      vuint16m4_t vs1, vuint16m4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_vv_u32m8_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                         vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                         size_t vl) {
+  return __riscv_vwabdau_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                         vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                         size_t vl) {
+  return __riscv_vwabdau_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                       vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u16m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                       vuint8m1_t vs1, vuint8m1_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u16m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                       vuint8m2_t vs1, vuint8m2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u16m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                       vuint8m4_t vs1, vuint8m4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u16m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                         vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                         size_t vl) {
+  return __riscv_vwabdau_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                       vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u32m1_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                       vuint16m1_t vs1, vuint16m1_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u32m2_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                       vuint16m2_t vs1, vuint16m2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u32m4_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                       vuint16m4_t vs1, vuint16m4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u32m8_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u16mf4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u16mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                     vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u16m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+                                     vuint8m1_t vs1, vuint8m1_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u16m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+                                     vuint8m2_t vs1, vuint8m2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u16m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+                                     vuint8m4_t vs1, vuint8m4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u16m8_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_vv_u32mf2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                     vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u32m1_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                     vuint16m1_t vs1, vuint16m1_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u32m2_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+                                     vuint16m2_t vs1, vuint16m2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u32m4_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+                                     vuint16m4_t vs1, vuint16m4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_vv_u32m8_mu(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd.c
new file mode 100644
index 0000000000000..a7e91e2a4bd5a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd.c
@@ -0,0 +1,585 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2,
+                                  vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2,
+                                  vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2,
+                                  vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2,
+                                    vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2,
+                                    vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2,
+                                  vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2,
+                                  vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2,
+                                  vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2,
+                                  vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                 vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                     vint16mf2_t vs2, vint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                    vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                    vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                    vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                  vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                  vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                  vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                  vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                      vint16mf4_t vs2, vint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                      vint16mf2_t vs2, vint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                    vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                    vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                    vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                    vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                  vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                  vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                  vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                    vint16mf4_t vs2, vint16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                    vint16mf2_t vs2, vint16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                  vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                  vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                  vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                  vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_mu(vm, vd, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu.c
new file mode 100644
index 0000000000000..c1929670e3a0b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu.c
@@ -0,0 +1,603 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2,
+                                   vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2,
+                                   vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2,
+                                   vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2,
+                                     vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2,
+                                     vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
+                                   vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                    vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                  vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                    vuint16m8_t vs2, vuint16m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vabdu_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                   vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                   vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                   vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                   vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m1_t vs2, vuint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m2_t vs2, vuint16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m4_t vs2, vuint16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                     vuint16m8_t vs2, vuint16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                   vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                   vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+                                   vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                 vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                 vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                 vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                 vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                     vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                     vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                   vuint16m1_t vs2, vuint16m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_mu(vm, vd, vs2, vs1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs.c
new file mode 100644
index 0000000000000..747fae75a5694
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs.c
@@ -0,0 +1,956 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_tu(vuint32mf2_t vd, vint32mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_tu(vuint32m1_t vd, vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_tu(vuint32m2_t vd, vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_tu(vuint32m4_t vd, vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_tu(vuint32m8_t vd, vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_tu(vuint64m1_t vd, vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_tu(vuint64m2_t vd, vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_tu(vuint64m4_t vd, vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_tu(vuint64m8_t vd, vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                    vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                    vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                    vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vint32m1_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vint32m2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vint32m4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vint32m8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vint64m1_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vint64m2_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vint64m4_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vint64m8_t vs2,
+                                  size_t vl) {
+  return __riscv_vabs_tum(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                     vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                     vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                     vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vint32m1_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vint32m2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vint32m4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vint32m8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vint64m1_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vint64m2_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vint64m4_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vint64m8_t vs2,
+                                   size_t vl) {
+  return __riscv_vabs_tumu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabs_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vint8mf8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabs_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vint8mf4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabs_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vint8mf2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabs_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vint8m1_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabs_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vint8m2_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabs_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vint8m4_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabs_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vint8m8_t vs2,
+                               size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabs_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                   vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabs_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                   vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabs_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vint16m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabs_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vint16m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabs_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vint16m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabs_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vint16m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vabs_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                   vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vabs_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vint32m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vabs_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vint32m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vabs_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vint32m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vabs_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vint32m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vabs_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vint64m1_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vabs_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vint64m2_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vabs_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vint64m4_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vabs_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vint64m8_t vs2,
+                                 size_t vl) {
+  return __riscv_vabs_mu(vm, vd, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda.c
new file mode 100644
index 0000000000000..a71ee33c1a6e1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda.c
@@ -0,0 +1,511 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_tu(vuint16mf4_t vd, vint8mf8_t vs1,
+                                      vint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_tu(vuint16mf2_t vd, vint8mf4_t vs1,
+                                      vint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_tu(vuint16m1_t vd, vint8mf2_t vs1,
+                                    vint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_tu(vuint16m2_t vd, vint8m1_t vs1,
+                                    vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_tu(vuint16m4_t vd, vint8m2_t vs1,
+                                    vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_tu(vuint16m8_t vd, vint8m4_t vs1,
+                                    vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_tu(vuint32mf2_t vd, vint16mf4_t vs1,
+                                      vint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_tu(vuint32m1_t vd, vint16mf2_t vs1,
+                                    vint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_tu(vuint32m2_t vd, vint16m1_t vs1,
+                                    vint16m1_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_tu(vuint32m4_t vd, vint16m2_t vs1,
+                                    vint16m2_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_tu(vuint32m8_t vd, vint16m4_t vs1,
+                                    vint16m4_t vs2, size_t vl) {
+  return __riscv_vwabda_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vint8mf8_t vs1, vint8mf8_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vint8mf4_t vs1, vint8mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vint8mf2_t vs1, vint8mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vint8m1_t vs1,
+                                     vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vint8m2_t vs1,
+                                     vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vint8m4_t vs1,
+                                     vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vint16mf4_t vs1, vint16mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vint16mf2_t vs1, vint16mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vint16m1_t vs1, vint16m1_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vint16m2_t vs1, vint16m2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                     vint16m4_t vs1, vint16m4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabda_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vint8mf8_t vs1, vint8mf8_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vint8mf4_t vs1, vint8mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vint8mf2_t vs1, vint8mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vint8m1_t vs1, vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vint8m2_t vs1, vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                      vint8m4_t vs1, vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vint16mf4_t vs1, vint16mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vint16mf2_t vs1, vint16mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vint16m1_t vs1, vint16m1_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vint16m2_t vs1, vint16m2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                      vint16m4_t vs1, vint16m4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabda_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vint8mf8_t vs1, vint8mf8_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabda_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vint8mf4_t vs1, vint8mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabda_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabda_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vint8m1_t vs1,
+                                    vint8m1_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabda_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vint8m2_t vs1,
+                                    vint8m2_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabda_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vint8m4_t vs1,
+                                    vint8m4_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabda_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vint16mf4_t vs1, vint16mf4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabda_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vint16mf2_t vs1, vint16mf2_t vs2,
+                                    size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabda_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vint16m1_t vs1, vint16m1_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabda_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vint16m2_t vs1,
+                                    vint16m2_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabda_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vint16m4_t vs1,
+                                    vint16m4_t vs2, size_t vl) {
+  return __riscv_vwabda_mu(vm, vd, vs1, vs2, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau.c
new file mode 100644
index 0000000000000..651f299b8412a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau.c
@@ -0,0 +1,524 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1,
+                                       vuint8mf8_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1,
+                                       vuint8mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1,
+                                     vuint8mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1,
+                                     vuint8m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1,
+                                     vuint8m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1,
+                                     vuint8m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1,
+                                       vuint16mf4_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1,
+                                     vuint16mf2_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1,
+                                     vuint16m1_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1,
+                                     vuint16m2_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1,
+                                     vuint16m4_t vs2, size_t vl) {
+  return __riscv_vwabdau_tu(vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                      vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                      vuint8m1_t vs1, vuint8m1_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                      vuint8m2_t vs1, vuint8m2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                      vuint8m4_t vs1, vuint8m4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                        size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                      vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                      vuint16m1_t vs1, vuint16m1_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                      vuint16m2_t vs1, vuint16m2_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                      vuint16m4_t vs1, vuint16m4_t vs2,
+                                      size_t vl) {
+  return __riscv_vwabdau_tum(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                         vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                         size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                         vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                         size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                       vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                       vuint8m1_t vs1, vuint8m1_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                       vuint8m2_t vs1, vuint8m2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                       vuint8m4_t vs1, vuint8m4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                         vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                         size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                       vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                       vuint16m1_t vs1, vuint16m1_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                       vuint16m2_t vs1, vuint16m2_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                       vuint16m4_t vs1, vuint16m4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_tumu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS1:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint8mf8_t vs1, vuint8mf8_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS1:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint8mf4_t vs1, vuint8mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS1:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                     vuint8mf2_t vs1, vuint8mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS1:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+                                     vuint8m1_t vs1, vuint8m1_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS1:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+                                     vuint8m2_t vs1, vuint8m2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS1:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+                                     vuint8m4_t vs1, vuint8m4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS1:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint16mf4_t vs1, vuint16mf4_t vs2,
+                                       size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS1:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                     vuint16mf2_t vs1, vuint16mf2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS1:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                     vuint16m1_t vs1, vuint16m1_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS1:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+                                     vuint16m2_t vs1, vuint16m2_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS1:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+                                     vuint16m4_t vs1, vuint16m4_t vs2,
+                                     size_t vl) {
+  return __riscv_vwabdau_mu(vm, vd, vs1, vs2, vl);
+}

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f194ce99b52d1..92ca3a827d412 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -356,15 +356,34 @@ let TargetPrefix = "riscv" in {
                     [IntrNoMem]>, RISCVVIntrinsic {
     let VLOperand = 3;
   }
-  // For destination vector type is the same as first and second source vector.
+  // For mask logical operations without passthru operand.
   // Input: (vector_in, vector_in, vl)
-  class RISCVBinaryAAAUnMasked
+  class RISCVMaskLogical
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
                     [IntrNoMem]>, RISCVVIntrinsic {
     let VLOperand = 2;
   }
   // For destination vector type is the same as first and second source vector.
+  // Input: (passthru, vector_in, vector_in, vl)
+  class RISCVBinaryAAAUnMasked
+        : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                    [LLVMMatchType<0>, LLVMMatchType<0>,
+                     LLVMMatchType<0>, llvm_anyint_ty],
+                    [IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 3;
+  }
+  // For destination vector type is the same as first and second source vector (with mask).
+  // Input: (passthru, vector_in, vector_in, mask, vl, policy)
+  class RISCVBinaryAAAMasked
+       : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                   [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+                    LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+                    LLVMMatchType<1>],
+                   [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+    let VLOperand = 4;
+  }
+  // For destination vector type is the same as first and second source vector.
   // Input: (passthru, vector_in, int_vector_in, vl)
   class RISCVRGatherVVUnMasked
         : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
@@ -1203,6 +1222,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
     def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
   }
+  multiclass RISCVBinaryAAA {
+    def "int_riscv_" # NAME : RISCVBinaryAAAUnMasked;
+    def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAAMasked;
+  }
   // Like RISCVBinaryAAX, but the second operand is used a shift amount so it
   // must be a vector or an XLen scalar.
   multiclass RISCVBinaryAAShift {
@@ -1610,14 +1633,14 @@ let TargetPrefix = "riscv" in {
     defm vfwredosum : RISCVReductionRoundingMode;
   }
 
-  def int_riscv_vmand: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmor: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
-  def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
+  def int_riscv_vmand: RISCVMaskLogical;
+  def int_riscv_vmnand: RISCVMaskLogical;
+  def int_riscv_vmandn: RISCVMaskLogical;
+  def int_riscv_vmxor: RISCVMaskLogical;
+  def int_riscv_vmor: RISCVMaskLogical;
+  def int_riscv_vmnor: RISCVMaskLogical;
+  def int_riscv_vmorn: RISCVMaskLogical;
+  def int_riscv_vmxnor: RISCVMaskLogical;
   def int_riscv_vmclr : RISCVNullaryIntrinsic;
   def int_riscv_vmset : RISCVNullaryIntrinsic;
 
@@ -1907,6 +1930,17 @@ let TargetPrefix = "riscv" in {
   def int_riscv_vsm3me   : RISCVBinaryAAXUnMasked;
 } // TargetPrefix = "riscv"
 
+//===----------------------------------------------------------------------===//
+// Zvabd - Vector Absolute Difference
+//===----------------------------------------------------------------------===//
+let TargetPrefix = "riscv" in {
+  defm vabs    : RISCVUnaryAA;
+  defm vabd    : RISCVBinaryAAA;
+  defm vabdu   : RISCVBinaryAAA;
+  defm vwabda  : RISCVTernaryWide;
+  defm vwabdau : RISCVTernaryWide;
+} // TargetPrefix = "riscv"
+
 //===----------------------------------------------------------------------===//
 // Zvqdotq - Vector quad widening 4D Dot Product
 //

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 09c70ba72da29..aa25a7af443e4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -86,6 +86,12 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
 defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
 defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
 
+defm : VPatUnaryV_V<"int_riscv_vabs", "PseudoVABS", AllIntegerVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabd", "PseudoVABD", ABDIntVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabdu", "PseudoVABDU", ABDIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabda", "PseudoVWABDA", ABDAIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdau", "PseudoVWABDAU", ABDAIntVectors>;
+
 foreach vti = AllIntegerVectors in {
   def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
             (!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
new file mode 100644
index 0000000000000..f3750e7f4513d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -0,0 +1,335 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabd(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl)
+  ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 1 x i8> @vabd_vv_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabd(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl)
+  ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabd_vv_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %a,
+    <vscale x 2 x i8> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabd(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl)
+  ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabd_vv_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %a,
+    <vscale x 4 x i8> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabd(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl)
+  ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabd_vv_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %a,
+    <vscale x 8 x i8> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabd(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabd_vv_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %a,
+    <vscale x 16 x i8> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabd(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl)
+  ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabd_vv_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %a,
+    <vscale x 32 x i8> %b,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabd(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl)
+  ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabd_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabd.mask(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %a,
+    <vscale x 64 x i8> %b,
+    <vscale x 64 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabd(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vabd_vv_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabd.mask(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %a,
+    <vscale x 1 x i16> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabd(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabd_vv_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabd.mask(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %a,
+    <vscale x 2 x i16> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabd(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabd_vv_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabd.mask(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %a,
+    <vscale x 4 x i16> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabd(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabd_vv_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabd.mask(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %a,
+    <vscale x 8 x i16> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabd(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabd_vv_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabd.mask(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %a,
+    <vscale x 16 x i16> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vabd.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabd(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabd_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask,iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vabd.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabd.mask(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %a,
+    <vscale x 32 x i16> %b,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i16> %res
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
new file mode 100644
index 0000000000000..e629e41b61172
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -0,0 +1,335 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabdu(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl)
+  ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 1 x i8> @vabdu_vv_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabdu(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl)
+  ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabdu_vv_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %a,
+    <vscale x 2 x i8> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabdu(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl)
+  ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabdu_vv_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %a,
+    <vscale x 4 x i8> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabdu(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl)
+  ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabdu_vv_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %a,
+    <vscale x 8 x i8> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabdu(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabdu_vv_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %a,
+    <vscale x 16 x i8> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabdu(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl)
+  ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabdu_vv_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %a,
+    <vscale x 32 x i8> %b,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabdu(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl)
+  ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabdu_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %a,
+    <vscale x 64 x i8> %b,
+    <vscale x 64 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabdu(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vabdu_vv_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %a,
+    <vscale x 1 x i16> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabdu(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabdu_vv_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %a,
+    <vscale x 2 x i16> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v9
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabdu(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabdu_vv_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %a,
+    <vscale x 4 x i16> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabdu(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabdu_vv_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %a,
+    <vscale x 8 x i16> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v12
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabdu(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabdu_vv_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %a,
+    <vscale x 16 x i16> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vabdu.vv v8, v8, v16
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabdu(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabdu_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vabdu.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %a,
+    <vscale x 32 x i16> %b,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i16> %res
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vabs.ll b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
new file mode 100644
index 0000000000000..db1f6088971a6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
@@ -0,0 +1,536 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabs(<vscale x 1 x i8> poison, <vscale x 1 x i8> %v, iXLen %vl)
+  ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 1 x i8> @vabs_v_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %v, <vscale x 1 x i1> %mask,  iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %v,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabs(<vscale x 2 x i8> poison, <vscale x 2 x i8> %v, iXLen %vl)
+  ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabs_v_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %v,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabs(<vscale x 4 x i8> poison, <vscale x 4 x i8> %v, iXLen %vl)
+  ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabs_v_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %v,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabs(<vscale x 8 x i8> poison, <vscale x 8 x i8> %v, iXLen %vl)
+  ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabs_v_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %v,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabs(<vscale x 16 x i8> poison, <vscale x 16 x i8> %v, iXLen %vl)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabs_v_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %v, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %v,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabs(<vscale x 32 x i8> poison, <vscale x 32 x i8> %v, iXLen %vl)
+  ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabs_v_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %v, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vabs.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %v,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabs(<vscale x 64 x i8> poison, <vscale x 64 x i8> %v, iXLen %vl)
+  ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabs_v_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %v, <vscale x 64 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT:    vabs.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabs.mask(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %v,
+    <vscale x 64 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabs(<vscale x 1 x i16> poison, <vscale x 1 x i16> %v, iXLen %vl)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vabs_v_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabs.mask(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %v,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabs(<vscale x 2 x i16> poison, <vscale x 2 x i16> %v, iXLen %vl)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabs_v_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabs.mask(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %v,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabs(<vscale x 4 x i16> poison, <vscale x 4 x i16> %v, iXLen %vl)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabs_v_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabs.mask(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %v,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabs(<vscale x 8 x i16> poison, <vscale x 8 x i16> %v, iXLen %vl)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabs_v_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabs.mask(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %v,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabs(<vscale x 16 x i16> poison, <vscale x 16 x i16> %v, iXLen %vl)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabs_v_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %v, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vabs.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabs.mask(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %v,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabs(<vscale x 32 x i16> poison, <vscale x 32 x i16> %v, iXLen %vl)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabs_v_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %v, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT:    vabs.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabs.mask(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %v,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vabs(<vscale x 1 x i32> poison, <vscale x 1 x i32> %v, iXLen %vl)
+  ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 1 x i32> @vabs_v_mask_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vabs.mask(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 1 x i32> %v,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vabs(<vscale x 2 x i32> poison, <vscale x 2 x i32> %v, iXLen %vl)
+  ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 2 x i32> @vabs_v_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vabs.mask(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 2 x i32> %v,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vabs(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, iXLen %vl)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @vabs_v_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vabs.mask(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 4 x i32> %v,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vabs(<vscale x 8 x i32> poison, <vscale x 8 x i32> %v, iXLen %vl)
+  ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i32> @vabs_v_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vabs.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vabs.mask(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 8 x i32> %v,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vabs(<vscale x 16 x i32> poison, <vscale x 16 x i32> %v, iXLen %vl)
+  ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x i32> @vabs_v_mask_i32m8(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %v, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT:    vabs.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vabs.mask(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 16 x i32> %v,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i64> @llvm.riscv.vabs(<vscale x 1 x i64> poison, <vscale x 1 x i64> %v, iXLen %vl)
+  ret <vscale x 1 x i64> %res
+}
+
+define <vscale x 1 x i64> @vabs_v_mask_i64m1(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vabs.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i64> @llvm.riscv.vabs.mask(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 1 x i64> %v,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i64> %res
+}
+
+define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i64> @llvm.riscv.vabs(<vscale x 2 x i64> poison, <vscale x 2 x i64> %v, iXLen %vl)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @vabs_v_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vabs.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i64> @llvm.riscv.vabs.mask(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 2 x i64> %v,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i64> @llvm.riscv.vabs(<vscale x 4 x i64> poison, <vscale x 4 x i64> %v, iXLen %vl)
+  ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x i64> @vabs_v_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vabs.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i64> @llvm.riscv.vabs.mask(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 4 x i64> %v,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT:    vabs.v v8, v8
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i64> @llvm.riscv.vabs(<vscale x 8 x i64> poison, <vscale x 8 x i64> %v, iXLen %vl)
+  ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 8 x i64> @vabs_v_mask_i64m8(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT:    vabs.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i64> @llvm.riscv.vabs.mask(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 8 x i64> %v,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i64> %res
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwabda.ll b/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
new file mode 100644
index 0000000000000..99816416a7a14
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
@@ -0,0 +1,283 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i16> @vwabda_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vwabda(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vwabda_vv_mask_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask(
+    <vscale x 1 x i16> %vd,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabda_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vwabda(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabda_vv_mask_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask(
+    <vscale x 2 x i16> %vd,
+    <vscale x 2 x i8> %a,
+    <vscale x 2 x i8> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabda_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vwabda(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabda_vv_mask_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask(
+    <vscale x 4 x i16> %vd,
+    <vscale x 4 x i8> %a,
+    <vscale x 4 x i8> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabda_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vwabda(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabda_vv_mask_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask(
+    <vscale x 8 x i16> %vd,
+    <vscale x 8 x i8> %a,
+    <vscale x 8 x i8> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vwabda_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vwabda(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vwabda_vv_mask_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask(
+    <vscale x 16 x i16> %vd,
+    <vscale x 16 x i8> %a,
+    <vscale x 16 x i8> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabda_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vwabda(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabda_vv_mask_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask(
+    <vscale x 32 x i16> %vd,
+    <vscale x 32 x i8> %a,
+    <vscale x 32 x i8> %b,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 1 x i32> @vwabda_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vwabda(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 1 x i32> @vwabda_vv_mask_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask(
+    <vscale x 1 x i32> %vd,
+    <vscale x 1 x i16> %a,
+    <vscale x 1 x i16> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 2 x i32> @vwabda_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vwabda(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 2 x i32> @vwabda_vv_mask_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask(
+    <vscale x 2 x i32> %vd,
+    <vscale x 2 x i16> %a,
+    <vscale x 2 x i16> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabda_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vwabda(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabda_vv_mask_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask(
+    <vscale x 4 x i32> %vd,
+    <vscale x 4 x i16> %a,
+    <vscale x 4 x i16> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabda_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vwabda(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabda_vv_mask_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask(
+    <vscale x 8 x i32> %vd,
+    <vscale x 8 x i16> %a,
+    <vscale x 8 x i16> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabda_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vwabda.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vwabda(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabda_vv_mask_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vwabda.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask(
+    <vscale x 16 x i32> %vd,
+    <vscale x 16 x i16> %a,
+    <vscale x 16 x i16> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i32> %res
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
new file mode 100644
index 0000000000000..9f8777235dc31
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
@@ -0,0 +1,283 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i16> @vwabdau_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vwabdau(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vwabdau_vv_mask_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask(
+    <vscale x 1 x i16> %vd,
+    <vscale x 1 x i8> %a,
+    <vscale x 1 x i8> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabdau_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vwabdau(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabdau_vv_mask_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask(
+    <vscale x 2 x i16> %vd,
+    <vscale x 2 x i8> %a,
+    <vscale x 2 x i8> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabdau_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vwabdau(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabdau_vv_mask_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask(
+    <vscale x 4 x i16> %vd,
+    <vscale x 4 x i8> %a,
+    <vscale x 4 x i8> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabdau_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vwabdau(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabdau_vv_mask_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask(
+    <vscale x 8 x i16> %vd,
+    <vscale x 8 x i8> %a,
+    <vscale x 8 x i8> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vwabdau_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vwabdau(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vwabdau_vv_mask_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask(
+    <vscale x 16 x i16> %vd,
+    <vscale x 16 x i8> %a,
+    <vscale x 16 x i8> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabdau_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vwabdau(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabdau_vv_mask_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask(
+    <vscale x 32 x i16> %vd,
+    <vscale x 32 x i8> %a,
+    <vscale x 32 x i8> %b,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 1 x i32> @vwabdau_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vwabdau(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 1 x i32> @vwabdau_vv_mask_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask(
+    <vscale x 1 x i32> %vd,
+    <vscale x 1 x i16> %a,
+    <vscale x 1 x i16> %b,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 2 x i32> @vwabdau_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vwabdau(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 2 x i32> @vwabdau_vv_mask_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask(
+    <vscale x 2 x i32> %vd,
+    <vscale x 2 x i16> %a,
+    <vscale x 2 x i16> %b,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabdau_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vwabdau(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabdau_vv_mask_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask(
+    <vscale x 4 x i32> %vd,
+    <vscale x 4 x i16> %a,
+    <vscale x 4 x i16> %b,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabdau_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vwabdau(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabdau_vv_mask_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask(
+    <vscale x 8 x i32> %vd,
+    <vscale x 8 x i16> %a,
+    <vscale x 8 x i16> %b,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabdau_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vwabdau.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vwabdau(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabdau_vv_mask_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT:    vwabdau.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask(
+    <vscale x 16 x i32> %vd,
+    <vscale x 16 x i16> %a,
+    <vscale x 16 x i16> %b,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 0)
+  ret <vscale x 16 x i32> %res
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}


        


More information about the llvm-commits mailing list