[clang] [llvm] [Clang][RISCV] Add Zvabd intrinsics (PR #180929)
Pengcheng Wang via cfe-commits
cfe-commits at lists.llvm.org
Wed Feb 11 04:23:34 PST 2026
https://github.com/wangpc-pp created https://github.com/llvm/llvm-project/pull/180929
None
>From 0c510206d29bd61e24c1fde8d4361e1a90a56bb8 Mon Sep 17 00:00:00 2001
From: Zhenxuan Sang <sang at bytedance.com>
Date: Wed, 11 Feb 2026 20:22:52 +0800
Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?=
=?UTF-8?q?l=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Created using spr 1.3.6-beta.1
---
clang/include/clang/Basic/riscv_vector.td | 18 +
.../zvabd/non-policy/non-overloaded/vabd_vv.c | 139 ++++++
.../non-policy/non-overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/non-policy/non-overloaded/vabs_v.c | 229 ++++++++++
.../non-policy/non-overloaded/vwabdacc_vv.c | 119 ++++++
.../non-policy/non-overloaded/vwabdaccu_vv.c | 119 ++++++
.../zvabd/non-policy/overloaded/vabd_vv.c | 139 ++++++
.../zvabd/non-policy/overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/non-policy/overloaded/vabs_v.c | 229 ++++++++++
.../zvabd/non-policy/overloaded/vwabdacc_vv.c | 119 ++++++
.../non-policy/overloaded/vwabdaccu_vv.c | 119 ++++++
.../zvabd/policy/non-overloaded/vabd_vv.c | 139 ++++++
.../zvabd/policy/non-overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/policy/non-overloaded/vabs_v.c | 229 ++++++++++
.../zvabd/policy/non-overloaded/vwabdacc_vv.c | 119 ++++++
.../policy/non-overloaded/vwabdaccu_vv.c | 119 ++++++
.../zvabd/policy/overloaded/vabd_vv.c | 139 ++++++
.../zvabd/policy/overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/policy/overloaded/vabs_v.c | 229 ++++++++++
.../zvabd/policy/overloaded/vwabdacc_vv.c | 119 ++++++
.../zvabd/policy/overloaded/vwabdaccu_vv.c | 119 ++++++
llvm/include/llvm/IR/IntrinsicsRISCV.td | 12 +
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 6 +
llvm/test/CodeGen/RISCV/rvv/vabd.ll | 238 +++++++++++
llvm/test/CodeGen/RISCV/rvv/vabdu.ll | 238 +++++++++++
llvm/test/CodeGen/RISCV/rvv/vabs.ll | 400 ++++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll | 202 +++++++++
llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll | 202 +++++++++
28 files changed, 4296 insertions(+)
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabd.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabdu.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabs.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c899dc70fc0b7..e25ecfe2c2d27 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2043,6 +2043,24 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vclmul : RVVInt64BinBuiltinSet;
defm vclmulh : RVVInt64BinBuiltinSet;
}
+
+ // zvabd
+ let RequiredFeatures = ["zvabd"] in {
+ defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "v", "vv"]]>;
+ defm vabd : RVVOutOp1BuiltinSet<"vabd", "cs", [["vv", "v", "vvv"]]>;
+ defm vabdu : RVVOutOp1BuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
+ }
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand in {
+ let RequiredFeatures = ["zvabd"] in {
+ defm vwabdacc : RVVBuiltinSet<"vwabdacc", "cs",
+ [["vv", "w", "wwvv"]],
+ [-1, 1, 2]>;
+ defm vwabdaccu : RVVBuiltinSet<"vwabdaccu", "cs",
+ [["vv", "Uw", "UwUwUvUv"]],
+ [-1, 1, 2]>;
+ }
}
let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..b97b39057306a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m8(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..838c4d98d63e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m8(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
new file mode 100644
index 0000000000000..67751cb294739
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m8(vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..a921f0868fd52
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m8(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..28ef213e00c1b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m8(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..39d4e33fcd907
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..850750a838b67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
new file mode 100644
index 0000000000000..875828058741a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..b5623160d751a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..6726fafde1451
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..404ea228d4ede
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..a2c7c22e86a93
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
new file mode 100644
index 0000000000000..cd44b41a06904
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m8_tu(vd, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..024e6b07fe26f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..bc6ac486ff71e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..0ac70e40ca8ec
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..0b3415e65e498
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
new file mode 100644
index 0000000000000..d992f1a41089e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..eb403a9d33a6e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..ea4f25ec0e121
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f194ce99b52d1..d8f1d0a88c897 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1907,6 +1907,18 @@ let TargetPrefix = "riscv" in {
def int_riscv_vsm3me : RISCVBinaryAAXUnMasked;
} // TargetPrefix = "riscv"
+//===----------------------------------------------------------------------===//
+// Zvabd - Vector Absolute Difference
+//===----------------------------------------------------------------------===//
+let TargetPrefix = "riscv" in {
+
+ defm vabs : RISCVUnaryAA;
+ defm vabd : RISCVBinaryAAX;
+ defm vabdu : RISCVBinaryAAX;
+ defm vwabdacc : RISCVTernaryWide;
+ defm vwabdaccu : RISCVTernaryWide;
+} // TargetPrefix = "riscv"
+
//===----------------------------------------------------------------------===//
// Zvqdotq - Vector quad widening 4D Dot Product
//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 09c70ba72da29..ecd270c00f3f4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -86,6 +86,12 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
+defm : VPatUnaryV_V<"int_riscv_vabs", "PseudoVABS", AllIntegerVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabd", "PseudoVABD", ABDIntVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabdu", "PseudoVABDU", ABDIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdacc", "PseudoVWABDA", ABDAIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdaccu", "PseudoVWABDAU", ABDAIntVectors>;
+
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
new file mode 100644
index 0000000000000..24f242c64aab6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+ ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+ ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+ ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+ ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+ ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+ ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+ ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
new file mode 100644
index 0000000000000..da961efefe7f9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+ ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+ ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+ ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+ ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+ ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+ ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+ ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabs.ll b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
new file mode 100644
index 0000000000000..7be228baa37ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
@@ -0,0 +1,400 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, iXLen -1)
+ ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, iXLen -1)
+ ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, iXLen -1)
+ ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, iXLen -1)
+ ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, iXLen -1)
+ ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, iXLen -1)
+ ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, iXLen -1)
+ ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %a) {
+; RV32-LABEL: vabs_v_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, iXLen -1)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %a) {
+; RV32-LABEL: vabs_v_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, iXLen -1)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, iXLen -1)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, iXLen -1)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, iXLen -1)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, iXLen -1)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %a) {
+; RV32-LABEL: vabs_v_i32mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %a, iXLen -1)
+ ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %a, iXLen -1)
+ ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1)
+ ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %a, iXLen -1)
+ ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %a, iXLen -1)
+ ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, iXLen -1)
+ ret <vscale x 1 x i64> %res
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, iXLen -1)
+ ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, iXLen -1)
+ ret <vscale x 4 x i64> %res
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %a, iXLen -1)
+ ret <vscale x 8 x i64> %res
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
new file mode 100644
index 0000000000000..f09babbddae0e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdacc_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @vwabdacc_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @vwabdacc_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @vwabdacc_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT: vwabda.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT: vwabda.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @vwabdacc_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @vwabdacc_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @vwabdacc_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @vwabdacc_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @vwabdacc_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT: vwabda.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT: vwabda.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @vwabdacc_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @vwabdacc_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
new file mode 100644
index 0000000000000..54c5e0a1b32a6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdaccu_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @vwabdaccu_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @vwabdaccu_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @vwabdaccu_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @vwabdaccu_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @vwabdaccu_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @vwabdaccu_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @vwabdaccu_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @vwabdaccu_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @vwabdaccu_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @vwabdaccu_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
More information about the cfe-commits
mailing list