[clang] [llvm] [Clang][RISCV] Add Zvabd intrinsics (PR #180929)
Pengcheng Wang via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 12 20:51:04 PST 2026
https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/180929
>From 0c510206d29bd61e24c1fde8d4361e1a90a56bb8 Mon Sep 17 00:00:00 2001
From: Zhenxuan Sang <sang at bytedance.com>
Date: Wed, 11 Feb 2026 20:22:52 +0800
Subject: [PATCH 1/4] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20in?=
=?UTF-8?q?itial=20version?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Created using spr 1.3.6-beta.1
---
clang/include/clang/Basic/riscv_vector.td | 18 +
.../zvabd/non-policy/non-overloaded/vabd_vv.c | 139 ++++++
.../non-policy/non-overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/non-policy/non-overloaded/vabs_v.c | 229 ++++++++++
.../non-policy/non-overloaded/vwabdacc_vv.c | 119 ++++++
.../non-policy/non-overloaded/vwabdaccu_vv.c | 119 ++++++
.../zvabd/non-policy/overloaded/vabd_vv.c | 139 ++++++
.../zvabd/non-policy/overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/non-policy/overloaded/vabs_v.c | 229 ++++++++++
.../zvabd/non-policy/overloaded/vwabdacc_vv.c | 119 ++++++
.../non-policy/overloaded/vwabdaccu_vv.c | 119 ++++++
.../zvabd/policy/non-overloaded/vabd_vv.c | 139 ++++++
.../zvabd/policy/non-overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/policy/non-overloaded/vabs_v.c | 229 ++++++++++
.../zvabd/policy/non-overloaded/vwabdacc_vv.c | 119 ++++++
.../policy/non-overloaded/vwabdaccu_vv.c | 119 ++++++
.../zvabd/policy/overloaded/vabd_vv.c | 139 ++++++
.../zvabd/policy/overloaded/vabdu_vv.c | 139 ++++++
.../zvabd/policy/overloaded/vabs_v.c | 229 ++++++++++
.../zvabd/policy/overloaded/vwabdacc_vv.c | 119 ++++++
.../zvabd/policy/overloaded/vwabdaccu_vv.c | 119 ++++++
llvm/include/llvm/IR/IntrinsicsRISCV.td | 12 +
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 6 +
llvm/test/CodeGen/RISCV/rvv/vabd.ll | 238 +++++++++++
llvm/test/CodeGen/RISCV/rvv/vabdu.ll | 238 +++++++++++
llvm/test/CodeGen/RISCV/rvv/vabs.ll | 400 ++++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll | 202 +++++++++
llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll | 202 +++++++++
28 files changed, 4296 insertions(+)
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabd.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabdu.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabs.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c899dc70fc0b7..e25ecfe2c2d27 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2043,6 +2043,24 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
defm vclmul : RVVInt64BinBuiltinSet;
defm vclmulh : RVVInt64BinBuiltinSet;
}
+
+ // zvabd
+ let RequiredFeatures = ["zvabd"] in {
+ defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "v", "vv"]]>;
+ defm vabd : RVVOutOp1BuiltinSet<"vabd", "cs", [["vv", "v", "vvv"]]>;
+ defm vabdu : RVVOutOp1BuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
+ }
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand in {
+ let RequiredFeatures = ["zvabd"] in {
+ defm vwabdacc : RVVBuiltinSet<"vwabdacc", "cs",
+ [["vv", "w", "wwvv"]],
+ [-1, 1, 2]>;
+ defm vwabdaccu : RVVBuiltinSet<"vwabdaccu", "cs",
+ [["vv", "Uw", "UwUwUvUv"]],
+ [-1, 1, 2]>;
+ }
}
let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..b97b39057306a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m8(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..838c4d98d63e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m8(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
new file mode 100644
index 0000000000000..67751cb294739
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m8(vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..a921f0868fd52
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m8(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..28ef213e00c1b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m8(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..39d4e33fcd907
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..850750a838b67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
new file mode 100644
index 0000000000000..875828058741a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs(vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..b5623160d751a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..6726fafde1451
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..404ea228d4ede
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..a2c7c22e86a93
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
new file mode 100644
index 0000000000000..cd44b41a06904
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i8m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_i64m8_tu(vd, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..024e6b07fe26f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..bc6ac486ff71e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..0ac70e40ca8ec
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..0b3415e65e498
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+ return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
new file mode 100644
index 0000000000000..d992f1a41089e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_tu(vd, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..eb403a9d33a6e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..ea4f25ec0e121
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f194ce99b52d1..d8f1d0a88c897 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1907,6 +1907,18 @@ let TargetPrefix = "riscv" in {
def int_riscv_vsm3me : RISCVBinaryAAXUnMasked;
} // TargetPrefix = "riscv"
+//===----------------------------------------------------------------------===//
+// Zvabd - Vector Absolute Difference
+//===----------------------------------------------------------------------===//
+let TargetPrefix = "riscv" in {
+
+ defm vabs : RISCVUnaryAA;
+ defm vabd : RISCVBinaryAAX;
+ defm vabdu : RISCVBinaryAAX;
+ defm vwabdacc : RISCVTernaryWide;
+ defm vwabdaccu : RISCVTernaryWide;
+} // TargetPrefix = "riscv"
+
//===----------------------------------------------------------------------===//
// Zvqdotq - Vector quad widening 4D Dot Product
//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 09c70ba72da29..ecd270c00f3f4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -86,6 +86,12 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
+defm : VPatUnaryV_V<"int_riscv_vabs", "PseudoVABS", AllIntegerVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabd", "PseudoVABD", ABDIntVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabdu", "PseudoVABDU", ABDIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdacc", "PseudoVWABDA", ABDAIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdaccu", "PseudoVWABDAU", ABDAIntVectors>;
+
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
(!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
new file mode 100644
index 0000000000000..24f242c64aab6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+ ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+ ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+ ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+ ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+ ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+ ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i8m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+ ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT: vabd.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabd_vv_i16m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT: vabd.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
new file mode 100644
index 0000000000000..da961efefe7f9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+ ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+ ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+ ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+ ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+ ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+ ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i8m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+ ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v9
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v12
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v12
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT: vabdu.vv v8, v8, v16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabdu_vv_i16m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT: vabdu.vv v8, v8, v16
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabs.ll b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
new file mode 100644
index 0000000000000..7be228baa37ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
@@ -0,0 +1,400 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, iXLen -1)
+ ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, iXLen -1)
+ ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, iXLen -1)
+ ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, iXLen -1)
+ ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, iXLen -1)
+ ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, iXLen -1)
+ ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i8m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, iXLen -1)
+ ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %a) {
+; RV32-LABEL: vabs_v_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, iXLen -1)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %a) {
+; RV32-LABEL: vabs_v_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, iXLen -1)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, iXLen -1)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, iXLen -1)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, iXLen -1)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i16m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, iXLen -1)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %a) {
+; RV32-LABEL: vabs_v_i32mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %a, iXLen -1)
+ ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %a, iXLen -1)
+ ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1)
+ ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %a, iXLen -1)
+ ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i32m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %a, iXLen -1)
+ ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, iXLen -1)
+ ret <vscale x 1 x i64> %res
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, iXLen -1)
+ ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, iXLen -1)
+ ret <vscale x 4 x i64> %res
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT: vabs.v v8, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vabs_v_i64m8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64-NEXT: vabs.v v8, v8
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %a, iXLen -1)
+ ret <vscale x 8 x i64> %res
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
new file mode 100644
index 0000000000000..f09babbddae0e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdacc_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @vwabdacc_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @vwabdacc_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @vwabdacc_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT: vwabda.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT: vwabda.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @vwabdacc_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @vwabdacc_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @vwabdacc_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @vwabdacc_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @vwabdacc_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT: vwabda.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT: vwabda.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @vwabdacc_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @vwabdacc_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
new file mode 100644
index 0000000000000..54c5e0a1b32a6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdaccu_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @vwabdaccu_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @vwabdaccu_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @vwabdaccu_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @vwabdaccu_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @vwabdaccu_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @vwabdaccu_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @vwabdaccu_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @vwabdaccu_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @vwabdaccu_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @vwabdaccu_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
>From b8534e0400b05000679937b401a48eb8cda09c63 Mon Sep 17 00:00:00 2001
From: Zhenxuan Sang <sang at bytedance.com>
Date: Thu, 12 Feb 2026 19:32:00 +0800
Subject: [PATCH 2/4] Address comments
Created using spr 1.3.6-beta.1
---
clang/include/clang/Basic/riscv_vector.td | 14 +-
.../zvabd/non-policy/non-overloaded/vabd_vv.c | 79 ++++---
.../non-policy/non-overloaded/vabdu_vv.c | 1 -
.../zvabd/non-policy/non-overloaded/vabs_v.c | 133 ++++++------
.../{vwabdacc_vv.c => vwabda_vv.c} | 89 ++++----
.../non-policy/non-overloaded/vwabdau_vv.c | 118 ++++++++++
.../zvabd/non-policy/overloaded/vabd_vv.c | 53 +++--
.../zvabd/non-policy/overloaded/vabdu_vv.c | 1 -
.../zvabd/non-policy/overloaded/vabs_v.c | 89 ++++----
.../overloaded/{vwabdacc_vv.c => vwabda_vv.c} | 89 ++++----
.../zvabd/non-policy/overloaded/vwabdau_vv.c | 118 ++++++++++
.../zvabd/policy/non-overloaded/vabd_vv.c | 79 ++++---
.../zvabd/policy/non-overloaded/vabdu_vv.c | 1 -
.../zvabd/policy/non-overloaded/vabs_v.c | 133 ++++++------
.../{vwabdacc_vv.c => vwabda_vv.c} | 89 ++++----
.../policy/non-overloaded/vwabdaccu_vv.c | 119 -----------
.../non-overloaded/vwabdau_vv.c} | 89 ++++----
.../zvabd/policy/overloaded/vabd_vv.c | 53 +++--
.../zvabd/policy/overloaded/vabdu_vv.c | 1 -
.../zvabd/policy/overloaded/vabs_v.c | 89 ++++----
.../overloaded/{vwabdacc_vv.c => vwabda_vv.c} | 89 ++++----
.../zvabd/policy/overloaded/vwabdaccu_vv.c | 119 -----------
.../overloaded/vwabdau_vv.c} | 89 ++++----
llvm/include/llvm/IR/IntrinsicsRISCV.td | 54 +++--
llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 4 +-
llvm/test/CodeGen/RISCV/rvv/vabd.ll | 53 ++---
llvm/test/CodeGen/RISCV/rvv/vabdu.ll | 53 ++---
llvm/test/CodeGen/RISCV/rvv/vabs.ll | 89 ++------
llvm/test/CodeGen/RISCV/rvv/vwabda.ll | 179 ++++++++++++++++
llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll | 202 ------------------
llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll | 202 ------------------
llvm/test/CodeGen/RISCV/rvv/vwabdau.ll | 179 ++++++++++++++++
32 files changed, 1302 insertions(+), 1447 deletions(-)
rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/{vwabdacc_vv.c => vwabda_vv.c} (58%)
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau_vv.c
rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/{vwabdacc_vv.c => vwabda_vv.c} (59%)
create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau_vv.c
rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/{vwabdacc_vv.c => vwabda_vv.c} (57%)
delete mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/{non-policy/non-overloaded/vwabdaccu_vv.c => policy/non-overloaded/vwabdau_vv.c} (56%)
rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/{vwabdacc_vv.c => vwabda_vv.c} (58%)
delete mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
rename clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/{non-policy/overloaded/vwabdaccu_vv.c => policy/overloaded/vwabdau_vv.c} (56%)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabda.ll
delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
delete mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index e25ecfe2c2d27..25bc401db2068 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2046,20 +2046,18 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
// zvabd
let RequiredFeatures = ["zvabd"] in {
- defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "v", "vv"]]>;
- defm vabd : RVVOutOp1BuiltinSet<"vabd", "cs", [["vv", "v", "vvv"]]>;
+ defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "Uv", "Uvv"]]>;
+ defm vabd : RVVOutOp1BuiltinSet<"vabd", "cs", [["vv", "Uv", "Uvvv"]]>;
defm vabdu : RVVOutOp1BuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
}
}
let UnMaskedPolicyScheme = HasPolicyOperand in {
let RequiredFeatures = ["zvabd"] in {
- defm vwabdacc : RVVBuiltinSet<"vwabdacc", "cs",
- [["vv", "w", "wwvv"]],
- [-1, 1, 2]>;
- defm vwabdaccu : RVVBuiltinSet<"vwabdaccu", "cs",
- [["vv", "Uw", "UwUwUvUv"]],
- [-1, 1, 2]>;
+ defm vwabda : RVVOutOp1Op2BuiltinSet<"vwabda", "cs",
+ [["vv", "Uw", "UwUwvv"]]>;
+ defm vwabdau : RVVOutOp1Op2BuiltinSet<"vwabdau", "cs",
+ [["vv", "Uw", "UwUwUvUv"]]>;
}
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
index b97b39057306a..4c844315ba231 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
@@ -7,133 +7,132 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8mf8(vs2, vs1, vl);
+vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8mf8(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8mf4(vs2, vs1, vl);
+vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8mf4(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8mf2(vs2, vs1, vl);
+vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8mf2(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m1(vs2, vs1, vl);
+vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m1(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m2(vs2, vs1, vl);
+vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m2(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m4(vs2, vs1, vl);
+vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m4(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m8(vs2, vs1, vl);
+vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m8(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16mf4(vs2, vs1, vl);
+vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16mf4(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16mf2(vs2, vs1, vl);
+vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16mf2(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m1(vs2, vs1, vl);
+vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m1(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m2(vs2, vs1, vl);
+vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m2(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m4(vs2, vs1, vl);
+vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m4(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m8(vs2, vs1, vl);
+vuint16m8_t test_vabd_vv_u16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m8(vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
index 838c4d98d63e3..5583fed5f7995 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
@@ -136,4 +136,3 @@ vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
return __riscv_vabdu_vv_u16m8(vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
index 67751cb294739..8a0e9cebc13f7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
@@ -7,223 +7,222 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
- return __riscv_vabs_v_i8mf8(vs2, vl);
+vuint8mf8_t test_vabs_v_u8mf8(vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8mf8(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
- return __riscv_vabs_v_i8mf4(vs2, vl);
+vuint8mf4_t test_vabs_v_u8mf4(vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8mf4(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
- return __riscv_vabs_v_i8mf2(vs2, vl);
+vuint8mf2_t test_vabs_v_u8mf2(vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8mf2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m1(vs2, vl);
+vuint8m1_t test_vabs_v_u8m1(vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m1(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m2(vs2, vl);
+vuint8m2_t test_vabs_v_u8m2(vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m4(vs2, vl);
+vuint8m4_t test_vabs_v_u8m4(vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m4(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m8(vs2, vl);
+vuint8m8_t test_vabs_v_u8m8(vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m8(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
- return __riscv_vabs_v_i16mf4(vs2, vl);
+vuint16mf4_t test_vabs_v_u16mf4(vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16mf4(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
- return __riscv_vabs_v_i16mf2(vs2, vl);
+vuint16mf2_t test_vabs_v_u16mf2(vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16mf2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m1(vs2, vl);
+vuint16m1_t test_vabs_v_u16m1(vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m1(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m2(vs2, vl);
+vuint16m2_t test_vabs_v_u16m2(vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m4(vs2, vl);
+vuint16m4_t test_vabs_v_u16m4(vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m4(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m8(vs2, vl);
+vuint16m8_t test_vabs_v_u16m8(vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m8(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
- return __riscv_vabs_v_i32mf2(vs2, vl);
+vuint32mf2_t test_vabs_v_u32mf2(vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32mf2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m1(vs2, vl);
+vuint32m1_t test_vabs_v_u32m1(vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m1(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m2(vs2, vl);
+vuint32m2_t test_vabs_v_u32m2(vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m4(vs2, vl);
+vuint32m4_t test_vabs_v_u32m4(vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m4(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m8(vs2, vl);
+vuint32m8_t test_vabs_v_u32m8(vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m8(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m1(vs2, vl);
+vuint64m1_t test_vabs_v_u64m1(vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m1(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m2(vs2, vl);
+vuint64m2_t test_vabs_v_u64m2(vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m2(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m4(vs2, vl);
+vuint64m4_t test_vabs_v_u64m4(vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m4(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m8(vs2, vl);
+vuint64m8_t test_vabs_v_u64m8(vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m8(vs2, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda_vv.c
similarity index 58%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda_vv.c
index a921f0868fd52..c14d7c1e58eba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabda_vv.c
@@ -7,113 +7,112 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16mf4(vd, vs2, vs1, vl);
+vuint16mf4_t test_vwabda_vv_u16mf4(vuint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16mf4(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16mf2(vd, vs2, vs1, vl);
+vuint16mf2_t test_vwabda_vv_u16mf2(vuint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16mf2(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m1(vd, vs2, vs1, vl);
+vuint16m1_t test_vwabda_vv_u16m1(vuint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m1(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m2(vd, vs2, vs1, vl);
+vuint16m2_t test_vwabda_vv_u16m2(vuint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m2(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m4(vd, vs2, vs1, vl);
+vuint16m4_t test_vwabda_vv_u16m4(vuint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m4(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m8(vd, vs2, vs1, vl);
+vuint16m8_t test_vwabda_vv_u16m8(vuint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m8(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32mf2(vd, vs2, vs1, vl);
+vuint32mf2_t test_vwabda_vv_u32mf2(vuint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32mf2(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m1(vd, vs2, vs1, vl);
+vuint32m1_t test_vwabda_vv_u32m1(vuint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m1(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m2(vd, vs2, vs1, vl);
+vuint32m2_t test_vwabda_vv_u32m2(vuint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m2(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m4(vd, vs2, vs1, vl);
+vuint32m4_t test_vwabda_vv_u32m4(vuint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m4(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m8(vd, vs2, vs1, vl);
+vuint32m8_t test_vwabda_vv_u32m8(vuint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m8(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau_vv.c
new file mode 100644
index 0000000000000..d3d3e97980291
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdau_vv.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m8(vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
index 39d4e33fcd907..32f51b151ba76 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
@@ -7,133 +7,132 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+vuint16m8_t test_vabd_vv_u16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
return __riscv_vabd(vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
index 850750a838b67..e649ede420f27 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
@@ -136,4 +136,3 @@ vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
return __riscv_vabdu(vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
index 875828058741a..a4864a0f20213 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
@@ -7,223 +7,222 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+vuint8mf8_t test_vabs_v_u8mf8(vint8mf8_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+vuint8mf4_t test_vabs_v_u8mf4(vint8mf4_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+vuint8mf2_t test_vabs_v_u8mf2(vint8mf2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+vuint8m1_t test_vabs_v_u8m1(vint8m1_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+vuint8m2_t test_vabs_v_u8m2(vint8m2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+vuint8m4_t test_vabs_v_u8m4(vint8m4_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+vuint8m8_t test_vabs_v_u8m8(vint8m8_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+vuint16mf4_t test_vabs_v_u16mf4(vint16mf4_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+vuint16mf2_t test_vabs_v_u16mf2(vint16mf2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+vuint16m1_t test_vabs_v_u16m1(vint16m1_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+vuint16m2_t test_vabs_v_u16m2(vint16m2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+vuint16m4_t test_vabs_v_u16m4(vint16m4_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+vuint16m8_t test_vabs_v_u16m8(vint16m8_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+vuint32mf2_t test_vabs_v_u32mf2(vint32mf2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+vuint32m1_t test_vabs_v_u32m1(vint32m1_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+vuint32m2_t test_vabs_v_u32m2(vint32m2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+vuint32m4_t test_vabs_v_u32m4(vint32m4_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+vuint32m8_t test_vabs_v_u32m8(vint32m8_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+vuint64m1_t test_vabs_v_u64m1(vint64m1_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+vuint64m2_t test_vabs_v_u64m2(vint64m2_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+vuint64m4_t test_vabs_v_u64m4(vint64m4_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+vuint64m8_t test_vabs_v_u64m8(vint64m8_t vs2, size_t vl) {
return __riscv_vabs(vs2, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda_vv.c
similarity index 59%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda_vv.c
index b5623160d751a..a0d867c5bf47a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabda_vv.c
@@ -7,113 +7,112 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint16mf4_t test_vwabda_vv_u16mf4(vuint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint16mf2_t test_vwabda_vv_u16mf2(vuint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint16m1_t test_vwabda_vv_u16m1(vuint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint16m2_t test_vwabda_vv_u16m2(vuint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint16m4_t test_vwabda_vv_u16m4(vuint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint16m8_t test_vwabda_vv_u16m8(vuint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint32mf2_t test_vwabda_vv_u32mf2(vuint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint32m1_t test_vwabda_vv_u32m1(vuint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint32m2_t test_vwabda_vv_u32m2(vuint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint32m4_t test_vwabda_vv_u32m4(vuint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
- return __riscv_vwabdacc(vd, vs2, vs1, vl);
+vuint32m8_t test_vwabda_vv_u32m8(vuint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabda(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau_vv.c
new file mode 100644
index 0000000000000..4711cd07d9ef1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdau_vv.c
@@ -0,0 +1,118 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN: -disable-O0-optnone \
+// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN: FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdau_vv_u16mf4(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdau_vv_u16mf2(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdau_vv_u16m1(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdau_vv_u16m2(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdau_vv_u16m4(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdau_vv_u16m8(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdau_vv_u32mf2(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdau_vv_u32m1(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdau_vv_u32m2(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdau_vv_u32m4(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdau_vv_u32m8(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdau(vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
index 404ea228d4ede..0200da53b2b1e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
@@ -7,133 +7,132 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8mf8_tu(vd, vs2, vs1, vl);
+vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8mf8_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8mf4_tu(vd, vs2, vs1, vl);
+vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8mf4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8mf2_tu(vd, vs2, vs1, vl);
+vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8mf2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m1_tu(vd, vs2, vs1, vl);
+vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m1_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m2_tu(vd, vs2, vs1, vl);
+vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m4_tu(vd, vs2, vs1, vl);
+vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
- return __riscv_vabd_vv_i8m8_tu(vd, vs2, vs1, vl);
+vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u8m8_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16mf4_tu(vd, vs2, vs1, vl);
+vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16mf4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16mf2_tu(vd, vs2, vs1, vl);
+vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16mf2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m1_tu(vd, vs2, vs1, vl);
+vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m1_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m2_tu(vd, vs2, vs1, vl);
+vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m4_tu(vd, vs2, vs1, vl);
+vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
- return __riscv_vabd_vv_i16m8_tu(vd, vs2, vs1, vl);
+vuint16m8_t test_vabd_vv_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+ return __riscv_vabd_vv_u16m8_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
index a2c7c22e86a93..2f34320c03290 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
@@ -136,4 +136,3 @@ vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t
vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
return __riscv_vabdu_vv_u16m8_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
index cd44b41a06904..00a3ca06f1439 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
@@ -7,223 +7,222 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
- return __riscv_vabs_v_i8mf8_tu(vd, vs2, vl);
+vuint8mf8_t test_vabs_v_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8mf8_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
- return __riscv_vabs_v_i8mf4_tu(vd, vs2, vl);
+vuint8mf4_t test_vabs_v_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8mf4_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
- return __riscv_vabs_v_i8mf2_tu(vd, vs2, vl);
+vuint8mf2_t test_vabs_v_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8mf2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m1_tu(vd, vs2, vl);
+vuint8m1_t test_vabs_v_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m1_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m2_tu(vd, vs2, vl);
+vuint8m2_t test_vabs_v_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m4_tu(vd, vs2, vl);
+vuint8m4_t test_vabs_v_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m4_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i8m8_tu(vd, vs2, vl);
+vuint8m8_t test_vabs_v_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u8m8_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
- return __riscv_vabs_v_i16mf4_tu(vd, vs2, vl);
+vuint16mf4_t test_vabs_v_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16mf4_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
- return __riscv_vabs_v_i16mf2_tu(vd, vs2, vl);
+vuint16mf2_t test_vabs_v_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16mf2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m1_tu(vd, vs2, vl);
+vuint16m1_t test_vabs_v_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m1_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m2_tu(vd, vs2, vl);
+vuint16m2_t test_vabs_v_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m4_tu(vd, vs2, vl);
+vuint16m4_t test_vabs_v_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m4_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i16m8_tu(vd, vs2, vl);
+vuint16m8_t test_vabs_v_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u16m8_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
- return __riscv_vabs_v_i32mf2_tu(vd, vs2, vl);
+vuint32mf2_t test_vabs_v_u32mf2_tu(vuint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32mf2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m1_tu(vd, vs2, vl);
+vuint32m1_t test_vabs_v_u32m1_tu(vuint32m1_t vd, vint32m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m1_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m2_tu(vd, vs2, vl);
+vuint32m2_t test_vabs_v_u32m2_tu(vuint32m2_t vd, vint32m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m4_tu(vd, vs2, vl);
+vuint32m4_t test_vabs_v_u32m4_tu(vuint32m4_t vd, vint32m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m4_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i32m8_tu(vd, vs2, vl);
+vuint32m8_t test_vabs_v_u32m8_tu(vuint32m8_t vd, vint32m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u32m8_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m1_tu(vd, vs2, vl);
+vuint64m1_t test_vabs_v_u64m1_tu(vuint64m1_t vd, vint64m1_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m1_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m2_tu(vd, vs2, vl);
+vuint64m2_t test_vabs_v_u64m2_tu(vuint64m2_t vd, vint64m2_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m2_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m4_tu(vd, vs2, vl);
+vuint64m4_t test_vabs_v_u64m4_tu(vuint64m4_t vd, vint64m4_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m4_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
- return __riscv_vabs_v_i64m8_tu(vd, vs2, vl);
+vuint64m8_t test_vabs_v_u64m8_tu(vuint64m8_t vd, vint64m8_t vs2, size_t vl) {
+ return __riscv_vabs_v_u64m8_tu(vd, vs2, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda_vv.c
similarity index 57%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda_vv.c
index 024e6b07fe26f..b2cee2ea37e41 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabda_vv.c
@@ -7,113 +7,112 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16mf4_tu(vd, vs2, vs1, vl);
+vuint16mf4_t test_vwabda_vv_u16mf4_tu(vuint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16mf4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16mf2_tu(vd, vs2, vs1, vl);
+vuint16mf2_t test_vwabda_vv_u16mf2_tu(vuint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16mf2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m1_tu(vd, vs2, vs1, vl);
+vuint16m1_t test_vwabda_vv_u16m1_tu(vuint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m1_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m2_tu(vd, vs2, vs1, vl);
+vuint16m2_t test_vwabda_vv_u16m2_tu(vuint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m4_tu(vd, vs2, vs1, vl);
+vuint16m4_t test_vwabda_vv_u16m4_tu(vuint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i16m8_tu(vd, vs2, vs1, vl);
+vuint16m8_t test_vwabda_vv_u16m8_tu(vuint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u16m8_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32mf2_tu(vd, vs2, vs1, vl);
+vuint32mf2_t test_vwabda_vv_u32mf2_tu(vuint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32mf2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m1_tu(vd, vs2, vs1, vl);
+vuint32m1_t test_vwabda_vv_u32m1_tu(vuint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m1_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m2_tu(vd, vs2, vs1, vl);
+vuint32m2_t test_vwabda_vv_u32m2_tu(vuint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m4_tu(vd, vs2, vs1, vl);
+vuint32m4_t test_vwabda_vv_u32m4_tu(vuint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
- return __riscv_vwabdacc_vv_i32m8_tu(vd, vs2, vs1, vl);
+vuint32m8_t test_vwabda_vv_u32m8_tu(vuint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabda_vv_u32m8_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
deleted file mode 100644
index bc6ac486ff71e..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
-// RUN: -disable-O0-optnone \
-// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
-// RUN: FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
-// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
-//
-vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16mf4_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
-// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
-//
-vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16mf2_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
-// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
-//
-vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m1_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
-// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
-//
-vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m2_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
-// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
-//
-vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m4_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
-// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
-//
-vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m8_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
-// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
-//
-vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32mf2_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
-// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
-//
-vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m1_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
-// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
-//
-vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m2_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
-// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
-//
-vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m4_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
-// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
-//
-vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m8_tu(vd, vs2, vs1, vl);
-}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau_vv.c
similarity index 56%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau_vv.c
index 28ef213e00c1b..33bc868391226 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdau_vv.c
@@ -7,113 +7,112 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16mf4(vd, vs2, vs1, vl);
+vuint16mf4_t test_vwabdau_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16mf4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16mf2(vd, vs2, vs1, vl);
+vuint16mf2_t test_vwabdau_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16mf2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m1(vd, vs2, vs1, vl);
+vuint16m1_t test_vwabdau_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m1_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m2(vd, vs2, vs1, vl);
+vuint16m2_t test_vwabdau_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m4(vd, vs2, vs1, vl);
+vuint16m4_t test_vwabdau_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u16m8(vd, vs2, vs1, vl);
+vuint16m8_t test_vwabdau_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u16m8_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32mf2(vd, vs2, vs1, vl);
+vuint32mf2_t test_vwabdau_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32mf2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m1(vd, vs2, vs1, vl);
+vuint32m1_t test_vwabdau_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m1_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m2(vd, vs2, vs1, vl);
+vuint32m2_t test_vwabdau_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m2_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m4(vd, vs2, vs1, vl);
+vuint32m4_t test_vwabdau_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m4_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_vv_u32m8(vd, vs2, vs1, vl);
+vuint32m8_t test_vwabdau_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdau_vv_u32m8_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
index 0ac70e40ca8ec..d2e24c4807ba1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
@@ -7,133 +7,132 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+vuint16m8_t test_vabd_vv_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
return __riscv_vabd_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
index 0b3415e65e498..a76f623b4b6d2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
@@ -136,4 +136,3 @@ vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t
vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
return __riscv_vabdu_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
index d992f1a41089e..7b37616213329 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
@@ -7,223 +7,222 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
-vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+vuint8mf8_t test_vabs_v_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
-vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+vuint8mf4_t test_vabs_v_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
-vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+vuint8mf2_t test_vabs_v_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
-vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+vuint8m1_t test_vabs_v_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
-vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+vuint8m2_t test_vabs_v_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
-vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+vuint8m4_t test_vabs_v_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
-vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+vuint8m8_t test_vabs_v_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+vuint16mf4_t test_vabs_v_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+vuint16mf2_t test_vabs_v_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+vuint16m1_t test_vabs_v_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+vuint16m2_t test_vabs_v_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+vuint16m4_t test_vabs_v_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+vuint16m8_t test_vabs_v_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+vuint32mf2_t test_vabs_v_u32mf2_tu(vuint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+vuint32m1_t test_vabs_v_u32m1_tu(vuint32m1_t vd, vint32m1_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+vuint32m2_t test_vabs_v_u32m2_tu(vuint32m2_t vd, vint32m2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+vuint32m4_t test_vabs_v_u32m4_tu(vuint32m4_t vd, vint32m4_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+vuint32m8_t test_vabs_v_u32m8_tu(vuint32m8_t vd, vint32m8_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_u64m1_tu
// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
-vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+vuint64m1_t test_vabs_v_u64m1_tu(vuint64m1_t vd, vint64m1_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_u64m2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
-vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+vuint64m2_t test_vabs_v_u64m2_tu(vuint64m2_t vd, vint64m2_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_u64m4_tu
// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
-vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+vuint64m4_t test_vabs_v_u64m4_tu(vuint64m4_t vd, vint64m4_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_u64m8_tu
// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
-vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+vuint64m8_t test_vabs_v_u64m8_tu(vuint64m8_t vd, vint64m8_t vs2, size_t vl) {
return __riscv_vabs_tu(vd, vs2, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda_vv.c
similarity index 58%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda_vv.c
index eb403a9d33a6e..420a5ca5dea71 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabda_vv.c
@@ -7,113 +7,112 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabda_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabda.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint16mf4_t test_vwabda_vv_u16mf4_tu(vuint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabda_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabda.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint16mf2_t test_vwabda_vv_u16mf2_tu(vuint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabda_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabda.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint16m1_t test_vwabda_vv_u16m1_tu(vuint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabda_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabda.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint16m2_t test_vwabda_vv_u16m2_tu(vuint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabda_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabda.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint16m4_t test_vwabda_vv_u16m4_tu(vuint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabda_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabda.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint16m8_t test_vwabda_vv_u16m8_tu(vuint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabda_vv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabda.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint32mf2_t test_vwabda_vv_u32mf2_tu(vuint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabda_vv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabda.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint32m1_t test_vwabda_vv_u32m1_tu(vuint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabda_vv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabda.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint32m2_t test_vwabda_vv_u32m2_tu(vuint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabda_vv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabda.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint32m4_t test_vwabda_vv_u32m4_tu(vuint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabda_vv_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabda.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
- return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+vuint32m8_t test_vwabda_vv_u32m8_tu(vuint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+ return __riscv_vwabda_tu(vd, vs2, vs1, vl);
}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
deleted file mode 100644
index ea4f25ec0e121..0000000000000
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
-// RUN: -disable-O0-optnone \
-// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
-// RUN: FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
-// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
-//
-vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
-// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
-//
-vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
-// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
-//
-vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
-// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
-//
-vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
-// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
-//
-vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
-// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
-//
-vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
-// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
-//
-vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
-// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
-//
-vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
-// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
-//
-vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
-// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
-//
-vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
-// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
-// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
-//
-vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
-}
-
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau_vv.c
similarity index 56%
rename from clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
rename to clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau_vv.c
index 6726fafde1451..2efcce52fe57d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdau_vv.c
@@ -7,113 +7,112 @@
#include <riscv_vector.h>
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdau_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdau.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
-vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint16mf4_t test_vwabdau_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdau_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdau.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
-vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint16mf2_t test_vwabdau_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdau_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdau.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
-vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint16m1_t test_vwabdau_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdau_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdau.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
-vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint16m2_t test_vwabdau_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdau_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdau.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
-vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint16m4_t test_vwabdau_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdau_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdau.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
-vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint16m8_t test_vwabdau_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdau_vv_u32mf2_tu
// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdau.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
-vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint32mf2_t test_vwabdau_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdau_vv_u32m1_tu
// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdau.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
-vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint32m1_t test_vwabdau_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdau_vv_u32m2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdau.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
-vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint32m2_t test_vwabdau_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdau_vv_u32m4_tu
// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdau.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
-vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint32m4_t test_vwabdau_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdau_vv_u32m8_tu
// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdau.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
-vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
- return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+vuint32m8_t test_vwabdau_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+ return __riscv_vwabdau_tu(vd, vs2, vs1, vl);
}
-
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index d8f1d0a88c897..92ca3a827d412 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -356,15 +356,34 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
- // For destination vector type is the same as first and second source vector.
+ // For mask logical operations without passthru operand.
// Input: (vector_in, vector_in, vl)
- class RISCVBinaryAAAUnMasked
+ class RISCVMaskLogical
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 2;
}
// For destination vector type is the same as first and second source vector.
+ // Input: (passthru, vector_in, vector_in, vl)
+ class RISCVBinaryAAAUnMasked
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 3;
+ }
+ // For destination vector type is the same as first and second source vector (with mask).
+ // Input: (passthru, vector_in, vector_in, mask, vl, policy)
+ class RISCVBinaryAAAMasked
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty,
+ LLVMMatchType<1>],
+ [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 4;
+ }
+ // For destination vector type is the same as first and second source vector.
// Input: (passthru, vector_in, int_vector_in, vl)
class RISCVRGatherVVUnMasked
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
@@ -1203,6 +1222,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVBinaryAAXUnMaskedRoundingMode;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMaskedRoundingMode;
}
+ multiclass RISCVBinaryAAA {
+ def "int_riscv_" # NAME : RISCVBinaryAAAUnMasked;
+ def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAAMasked;
+ }
// Like RISCVBinaryAAX, but the second operand is used a shift amount so it
// must be a vector or an XLen scalar.
multiclass RISCVBinaryAAShift {
@@ -1610,14 +1633,14 @@ let TargetPrefix = "riscv" in {
defm vfwredosum : RISCVReductionRoundingMode;
}
- def int_riscv_vmand: RISCVBinaryAAAUnMasked;
- def int_riscv_vmnand: RISCVBinaryAAAUnMasked;
- def int_riscv_vmandn: RISCVBinaryAAAUnMasked;
- def int_riscv_vmxor: RISCVBinaryAAAUnMasked;
- def int_riscv_vmor: RISCVBinaryAAAUnMasked;
- def int_riscv_vmnor: RISCVBinaryAAAUnMasked;
- def int_riscv_vmorn: RISCVBinaryAAAUnMasked;
- def int_riscv_vmxnor: RISCVBinaryAAAUnMasked;
+ def int_riscv_vmand: RISCVMaskLogical;
+ def int_riscv_vmnand: RISCVMaskLogical;
+ def int_riscv_vmandn: RISCVMaskLogical;
+ def int_riscv_vmxor: RISCVMaskLogical;
+ def int_riscv_vmor: RISCVMaskLogical;
+ def int_riscv_vmnor: RISCVMaskLogical;
+ def int_riscv_vmorn: RISCVMaskLogical;
+ def int_riscv_vmxnor: RISCVMaskLogical;
def int_riscv_vmclr : RISCVNullaryIntrinsic;
def int_riscv_vmset : RISCVNullaryIntrinsic;
@@ -1911,12 +1934,11 @@ let TargetPrefix = "riscv" in {
// Zvabd - Vector Absolute Difference
//===----------------------------------------------------------------------===//
let TargetPrefix = "riscv" in {
-
- defm vabs : RISCVUnaryAA;
- defm vabd : RISCVBinaryAAX;
- defm vabdu : RISCVBinaryAAX;
- defm vwabdacc : RISCVTernaryWide;
- defm vwabdaccu : RISCVTernaryWide;
+ defm vabs : RISCVUnaryAA;
+ defm vabd : RISCVBinaryAAA;
+ defm vabdu : RISCVBinaryAAA;
+ defm vwabda : RISCVTernaryWide;
+ defm vwabdau : RISCVTernaryWide;
} // TargetPrefix = "riscv"
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index ecd270c00f3f4..aa25a7af443e4 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -89,8 +89,8 @@ defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
defm : VPatUnaryV_V<"int_riscv_vabs", "PseudoVABS", AllIntegerVectors>;
defm : VPatBinaryV_VV<"int_riscv_vabd", "PseudoVABD", ABDIntVectors>;
defm : VPatBinaryV_VV<"int_riscv_vabdu", "PseudoVABDU", ABDIntVectors>;
-defm : VPatTernaryW_VV<"int_riscv_vwabdacc", "PseudoVWABDA", ABDAIntVectors>;
-defm : VPatTernaryW_VV<"int_riscv_vwabdaccu", "PseudoVWABDAU", ABDAIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabda", "PseudoVWABDA", ABDAIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdau", "PseudoVWABDAU", ABDAIntVectors>;
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
index 24f242c64aab6..9ff66ec463938 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -14,12 +14,10 @@ define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabd(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
ret <vscale x 1 x i8> %res
}
-declare <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
-
define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
; RV32-LABEL: vabd_vv_i8mf4:
; RV32: # %bb.0:
@@ -32,12 +30,10 @@ define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabd(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
ret <vscale x 2 x i8> %res
}
-declare <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
-
define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
; RV32-LABEL: vabd_vv_i8mf2:
; RV32: # %bb.0:
@@ -50,12 +46,10 @@ define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabd(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
ret <vscale x 4 x i8> %res
}
-declare <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
-
define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
; RV32-LABEL: vabd_vv_i8m1:
; RV32: # %bb.0:
@@ -68,12 +62,10 @@ define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %
; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabd(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
ret <vscale x 8 x i8> %res
}
-declare <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
-
define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; RV32-LABEL: vabd_vv_i8m2:
; RV32: # %bb.0:
@@ -86,12 +78,10 @@ define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8
; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v10
; RV64-NEXT: ret
- %res = call <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabd(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
ret <vscale x 16 x i8> %res
}
-declare <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
-
define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
; RV32-LABEL: vabd_vv_i8m4:
; RV32: # %bb.0:
@@ -104,12 +94,10 @@ define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8
; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v12
; RV64-NEXT: ret
- %res = call <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabd(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
ret <vscale x 32 x i8> %res
}
-declare <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
-
define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
; RV32-LABEL: vabd_vv_i8m8:
; RV32: # %bb.0:
@@ -122,12 +110,10 @@ define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8
; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v16
; RV64-NEXT: ret
- %res = call <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabd(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
ret <vscale x 64 x i8> %res
}
-declare <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
-
define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
; RV32-LABEL: vabd_vv_i16mf4:
; RV32: # %bb.0:
@@ -140,12 +126,10 @@ define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabd(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
ret <vscale x 1 x i16> %res
}
-declare <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
-
define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
; RV32-LABEL: vabd_vv_i16mf2:
; RV32: # %bb.0:
@@ -158,12 +142,10 @@ define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabd(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
ret <vscale x 2 x i16> %res
}
-declare <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
-
define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
; RV32-LABEL: vabd_vv_i16m1:
; RV32: # %bb.0:
@@ -176,12 +158,10 @@ define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i1
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabd(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
ret <vscale x 4 x i16> %res
}
-declare <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
-
define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; RV32-LABEL: vabd_vv_i16m2:
; RV32: # %bb.0:
@@ -194,12 +174,10 @@ define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i1
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v10
; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabd(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
ret <vscale x 8 x i16> %res
}
-declare <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
-
define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
; RV32-LABEL: vabd_vv_i16m4:
; RV32: # %bb.0:
@@ -212,12 +190,10 @@ define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x
; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v12
; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabd(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
ret <vscale x 16 x i16> %res
}
-declare <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
-
define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
; RV32-LABEL: vabd_vv_i16m8:
; RV32: # %bb.0:
@@ -230,9 +206,6 @@ define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x
; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; RV64-NEXT: vabd.vv v8, v8, v16
; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabd(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
ret <vscale x 32 x i16> %res
}
-
-declare <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
-
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
index da961efefe7f9..acd7502dbc345 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -14,12 +14,10 @@ define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabdu(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
ret <vscale x 1 x i8> %res
}
-declare <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
-
define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
; RV32-LABEL: vabdu_vv_i8mf4:
; RV32: # %bb.0:
@@ -32,12 +30,10 @@ define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabdu(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
ret <vscale x 2 x i8> %res
}
-declare <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
-
define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
; RV32-LABEL: vabdu_vv_i8mf2:
; RV32: # %bb.0:
@@ -50,12 +46,10 @@ define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabdu(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
ret <vscale x 4 x i8> %res
}
-declare <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
-
define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
; RV32-LABEL: vabdu_vv_i8m1:
; RV32: # %bb.0:
@@ -68,12 +62,10 @@ define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8>
; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabdu(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
ret <vscale x 8 x i8> %res
}
-declare <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
-
define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
; RV32-LABEL: vabdu_vv_i8m2:
; RV32: # %bb.0:
@@ -86,12 +78,10 @@ define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i
; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v10
; RV64-NEXT: ret
- %res = call <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabdu(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
ret <vscale x 16 x i8> %res
}
-declare <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
-
define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
; RV32-LABEL: vabdu_vv_i8m4:
; RV32: # %bb.0:
@@ -104,12 +94,10 @@ define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i
; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v12
; RV64-NEXT: ret
- %res = call <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabdu(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
ret <vscale x 32 x i8> %res
}
-declare <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
-
define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
; RV32-LABEL: vabdu_vv_i8m8:
; RV32: # %bb.0:
@@ -122,12 +110,10 @@ define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i
; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v16
; RV64-NEXT: ret
- %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabdu(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
ret <vscale x 64 x i8> %res
}
-declare <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
-
define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
; RV32-LABEL: vabdu_vv_i16mf4:
; RV32: # %bb.0:
@@ -140,12 +126,10 @@ define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabdu(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
ret <vscale x 1 x i16> %res
}
-declare <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
-
define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
; RV32-LABEL: vabdu_vv_i16mf2:
; RV32: # %bb.0:
@@ -158,12 +142,10 @@ define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabdu(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
ret <vscale x 2 x i16> %res
}
-declare <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
-
define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
; RV32-LABEL: vabdu_vv_i16m1:
; RV32: # %bb.0:
@@ -176,12 +158,10 @@ define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v9
; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabdu(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
ret <vscale x 4 x i16> %res
}
-declare <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
-
define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
; RV32-LABEL: vabdu_vv_i16m2:
; RV32: # %bb.0:
@@ -194,12 +174,10 @@ define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v10
; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabdu(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
ret <vscale x 8 x i16> %res
}
-declare <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
-
define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
; RV32-LABEL: vabdu_vv_i16m4:
; RV32: # %bb.0:
@@ -212,12 +190,10 @@ define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16
; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v12
; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabdu(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
ret <vscale x 16 x i16> %res
}
-declare <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
-
define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
; RV32-LABEL: vabdu_vv_i16m8:
; RV32: # %bb.0:
@@ -230,9 +206,6 @@ define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32
; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; RV64-NEXT: vabdu.vv v8, v8, v16
; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabdu(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
ret <vscale x 32 x i16> %res
}
-
-declare <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
-
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabs.ll b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
index 7be228baa37ee..0bb68bdcb2f5c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
@@ -14,12 +14,10 @@ define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, iXLen -1)
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabs(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, iXLen -1)
ret <vscale x 1 x i8> %res
}
-declare <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
-
define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %a) {
; RV32-LABEL: vabs_v_i8mf4:
; RV32: # %bb.0:
@@ -32,12 +30,10 @@ define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, iXLen -1)
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabs(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, iXLen -1)
ret <vscale x 2 x i8> %res
}
-declare <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
-
define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %a) {
; RV32-LABEL: vabs_v_i8mf2:
; RV32: # %bb.0:
@@ -50,12 +46,10 @@ define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, iXLen -1)
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabs(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, iXLen -1)
ret <vscale x 4 x i8> %res
}
-declare <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
-
define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %a) {
; RV32-LABEL: vabs_v_i8m1:
; RV32: # %bb.0:
@@ -68,12 +62,10 @@ define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, iXLen -1)
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabs(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, iXLen -1)
ret <vscale x 8 x i8> %res
}
-declare <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
-
define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %a) {
; RV32-LABEL: vabs_v_i8m2:
; RV32: # %bb.0:
@@ -86,12 +78,10 @@ define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, iXLen -1)
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabs(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, iXLen -1)
ret <vscale x 16 x i8> %res
}
-declare <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
-
define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %a) {
; RV32-LABEL: vabs_v_i8m4:
; RV32: # %bb.0:
@@ -104,12 +94,10 @@ define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, iXLen -1)
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabs(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, iXLen -1)
ret <vscale x 32 x i8> %res
}
-declare <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
-
define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %a) {
; RV32-LABEL: vabs_v_i8m8:
; RV32: # %bb.0:
@@ -122,12 +110,10 @@ define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %a) {
; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, iXLen -1)
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabs(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, iXLen -1)
ret <vscale x 64 x i8> %res
}
-declare <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
-
define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %a) {
; RV32-LABEL: vabs_v_i16mf4:
; RV32: # %bb.0:
@@ -140,12 +126,10 @@ define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %a) {
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, iXLen -1)
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabs(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, iXLen -1)
ret <vscale x 1 x i16> %res
}
-declare <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
-
define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %a) {
; RV32-LABEL: vabs_v_i16mf2:
; RV32: # %bb.0:
@@ -158,12 +142,10 @@ define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %a) {
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, iXLen -1)
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabs(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, iXLen -1)
ret <vscale x 2 x i16> %res
}
-declare <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
-
define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %a) {
; RV32-LABEL: vabs_v_i16m1:
; RV32: # %bb.0:
@@ -176,12 +158,10 @@ define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %a) {
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, iXLen -1)
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabs(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, iXLen -1)
ret <vscale x 4 x i16> %res
}
-declare <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
-
define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %a) {
; RV32-LABEL: vabs_v_i16m2:
; RV32: # %bb.0:
@@ -194,12 +174,10 @@ define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %a) {
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, iXLen -1)
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabs(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, iXLen -1)
ret <vscale x 8 x i16> %res
}
-declare <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
-
define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %a) {
; RV32-LABEL: vabs_v_i16m4:
; RV32: # %bb.0:
@@ -212,12 +190,10 @@ define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %a) {
; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, iXLen -1)
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabs(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, iXLen -1)
ret <vscale x 16 x i16> %res
}
-declare <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
-
define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %a) {
; RV32-LABEL: vabs_v_i16m8:
; RV32: # %bb.0:
@@ -230,12 +206,10 @@ define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %a) {
; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, iXLen -1)
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabs(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, iXLen -1)
ret <vscale x 32 x i16> %res
}
-declare <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
-
define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %a) {
; RV32-LABEL: vabs_v_i32mf2:
; RV32: # %bb.0:
@@ -248,12 +222,10 @@ define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %a) {
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %a, iXLen -1)
+ %res = call <vscale x 1 x i32> @llvm.riscv.vabs(<vscale x 1 x i32> poison, <vscale x 1 x i32> %a, iXLen -1)
ret <vscale x 1 x i32> %res
}
-declare <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
-
define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %a) {
; RV32-LABEL: vabs_v_i32m1:
; RV32: # %bb.0:
@@ -266,12 +238,10 @@ define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %a) {
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %a, iXLen -1)
+ %res = call <vscale x 2 x i32> @llvm.riscv.vabs(<vscale x 2 x i32> poison, <vscale x 2 x i32> %a, iXLen -1)
ret <vscale x 2 x i32> %res
}
-declare <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
-
define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %a) {
; RV32-LABEL: vabs_v_i32m2:
; RV32: # %bb.0:
@@ -284,12 +254,10 @@ define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %a) {
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1)
+ %res = call <vscale x 4 x i32> @llvm.riscv.vabs(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1)
ret <vscale x 4 x i32> %res
}
-declare <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
-
define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %a) {
; RV32-LABEL: vabs_v_i32m4:
; RV32: # %bb.0:
@@ -302,12 +270,10 @@ define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %a) {
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %a, iXLen -1)
+ %res = call <vscale x 8 x i32> @llvm.riscv.vabs(<vscale x 8 x i32> poison, <vscale x 8 x i32> %a, iXLen -1)
ret <vscale x 8 x i32> %res
}
-declare <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
-
define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %a) {
; RV32-LABEL: vabs_v_i32m8:
; RV32: # %bb.0:
@@ -320,12 +286,10 @@ define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %a) {
; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %a, iXLen -1)
+ %res = call <vscale x 16 x i32> @llvm.riscv.vabs(<vscale x 16 x i32> poison, <vscale x 16 x i32> %a, iXLen -1)
ret <vscale x 16 x i32> %res
}
-declare <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
-
define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %a) {
; RV32-LABEL: vabs_v_i64m1:
; RV32: # %bb.0:
@@ -338,12 +302,10 @@ define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %a) {
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, iXLen -1)
+ %res = call <vscale x 1 x i64> @llvm.riscv.vabs(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, iXLen -1)
ret <vscale x 1 x i64> %res
}
-declare <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
-
define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %a) {
; RV32-LABEL: vabs_v_i64m2:
; RV32: # %bb.0:
@@ -356,12 +318,10 @@ define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %a) {
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, iXLen -1)
+ %res = call <vscale x 2 x i64> @llvm.riscv.vabs(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, iXLen -1)
ret <vscale x 2 x i64> %res
}
-declare <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
-
define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %a) {
; RV32-LABEL: vabs_v_i64m4:
; RV32: # %bb.0:
@@ -374,12 +334,10 @@ define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %a) {
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, iXLen -1)
+ %res = call <vscale x 4 x i64> @llvm.riscv.vabs(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, iXLen -1)
ret <vscale x 4 x i64> %res
}
-declare <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
-
define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %a) {
; RV32-LABEL: vabs_v_i64m8:
; RV32: # %bb.0:
@@ -392,9 +350,6 @@ define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %a) {
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV64-NEXT: vabs.v v8, v8
; RV64-NEXT: ret
- %res = call <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %a, iXLen -1)
+ %res = call <vscale x 8 x i64> @llvm.riscv.vabs(<vscale x 8 x i64> poison, <vscale x 8 x i64> %a, iXLen -1)
ret <vscale x 8 x i64> %res
}
-
-declare <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
-
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabda.ll b/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
new file mode 100644
index 0000000000000..5e8349620a429
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
@@ -0,0 +1,179 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabda_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabda_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabda(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabda_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabda_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabda(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabda_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabda_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabda(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabda_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabda_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT: vwabda.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT: vwabda.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabda(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vwabda_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabda_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabda(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabda_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabda_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabda(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 1 x i32> @vwabda_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabda_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabda(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 2 x i32> @vwabda_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabda_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabda(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabda_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabda_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT: vwabda.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT: vwabda.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabda(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabda_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabda_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT: vwabda.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT: vwabda.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabda(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabda_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabda_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT: vwabda.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabda_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT: vwabda.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabda(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
deleted file mode 100644
index f09babbddae0e..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
+++ /dev/null
@@ -1,202 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i16> @vwabdacc_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
-; RV32-LABEL: vwabdacc_vv_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 1 x i16> %res
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
-
-define <vscale x 2 x i16> @vwabdacc_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
-; RV32-LABEL: vwabdacc_vv_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 2 x i16> %res
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
-
-define <vscale x 4 x i16> @vwabdacc_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
-; RV32-LABEL: vwabdacc_vv_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 4 x i16> %res
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
-
-define <vscale x 8 x i16> @vwabdacc_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
-; RV32-LABEL: vwabdacc_vv_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV32-NEXT: vwabda.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV64-NEXT: vwabda.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 8 x i16> %res
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
-
-define <vscale x 16 x i16> @vwabdacc_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; RV32-LABEL: vwabdacc_vv_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 16 x i16> %res
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
-
-define <vscale x 32 x i16> @vwabdacc_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
-; RV32-LABEL: vwabdacc_vv_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 32 x i16> %res
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
-
-define <vscale x 1 x i32> @vwabdacc_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
-; RV32-LABEL: vwabdacc_vv_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 1 x i32> %res
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
-
-define <vscale x 2 x i32> @vwabdacc_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
-; RV32-LABEL: vwabdacc_vv_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 2 x i32> %res
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
-
-define <vscale x 4 x i32> @vwabdacc_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
-; RV32-LABEL: vwabdacc_vv_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV32-NEXT: vwabda.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV64-NEXT: vwabda.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 4 x i32> %res
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
-
-define <vscale x 8 x i32> @vwabdacc_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; RV32-LABEL: vwabdacc_vv_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 8 x i32> %res
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
-
-define <vscale x 16 x i32> @vwabdacc_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
-; RV32-LABEL: vwabdacc_vv_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdacc_vv_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 16 x i32> %res
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
-
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
deleted file mode 100644
index 54c5e0a1b32a6..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
+++ /dev/null
@@ -1,202 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i16> @vwabdaccu_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
-; RV32-LABEL: vwabdaccu_vv_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 1 x i16> %res
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
-
-define <vscale x 2 x i16> @vwabdaccu_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
-; RV32-LABEL: vwabdaccu_vv_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 2 x i16> %res
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
-
-define <vscale x 4 x i16> @vwabdaccu_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
-; RV32-LABEL: vwabdaccu_vv_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 4 x i16> %res
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
-
-define <vscale x 8 x i16> @vwabdaccu_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
-; RV32-LABEL: vwabdaccu_vv_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 8 x i16> %res
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
-
-define <vscale x 16 x i16> @vwabdaccu_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; RV32-LABEL: vwabdaccu_vv_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 16 x i16> %res
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
-
-define <vscale x 32 x i16> @vwabdaccu_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
-; RV32-LABEL: vwabdaccu_vv_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
- ret <vscale x 32 x i16> %res
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
-
-define <vscale x 1 x i32> @vwabdaccu_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
-; RV32-LABEL: vwabdaccu_vv_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 1 x i32> %res
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
-
-define <vscale x 2 x i32> @vwabdaccu_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
-; RV32-LABEL: vwabdaccu_vv_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 2 x i32> %res
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
-
-define <vscale x 4 x i32> @vwabdaccu_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
-; RV32-LABEL: vwabdaccu_vv_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 4 x i32> %res
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
-
-define <vscale x 8 x i32> @vwabdaccu_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; RV32-LABEL: vwabdaccu_vv_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 8 x i32> %res
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
-
-define <vscale x 16 x i32> @vwabdaccu_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
-; RV32-LABEL: vwabdaccu_vv_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdaccu_vv_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
- ret <vscale x 16 x i32> %res
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
-
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
new file mode 100644
index 0000000000000..42c3f0cfa1306
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
@@ -0,0 +1,179 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdau_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdau_vv_i8mf8:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i8mf8:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdau(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabdau_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdau_vv_i8mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i8mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdau(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabdau_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdau_vv_i8mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i8mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdau(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabdau_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdau_vv_i8m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i8m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdau(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vwabdau_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdau_vv_i8m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i8m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdau(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabdau_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdau_vv_i8m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i8m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdau(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 1 x i32> @vwabdau_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdau_vv_i16mf4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i16mf4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdau(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 2 x i32> @vwabdau_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdau_vv_i16mf2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v9, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i16mf2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v9, v10
+; RV64-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdau(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabdau_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdau_vv_i16m1:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v10, v11
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i16m1:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v10, v11
+; RV64-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdau(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabdau_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdau_vv_i16m2:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v12, v14
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i16m2:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v12, v14
+; RV64-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdau(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabdau_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdau_vv_i16m4:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT: vwabdau.vv v8, v16, v20
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vwabdau_vv_i16m4:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT: vwabdau.vv v8, v16, v20
+; RV64-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdau(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
>From 3fd8e21e729e2eb3964ccd533de37aca3dad10cf Mon Sep 17 00:00:00 2001
From: Zhenxuan Sang <sang at bytedance.com>
Date: Thu, 12 Feb 2026 19:44:32 +0800
Subject: [PATCH 3/4] Use RVVOutBuiltinSet for vabd/vabdu to match Intrinsics
Created using spr 1.3.6-beta.1
---
clang/include/clang/Basic/riscv_vector.td | 4 +--
.../zvabd/non-policy/non-overloaded/vabd_vv.c | 26 +++++++++----------
.../non-policy/non-overloaded/vabdu_vv.c | 26 +++++++++----------
.../zvabd/non-policy/overloaded/vabd_vv.c | 26 +++++++++----------
.../zvabd/non-policy/overloaded/vabdu_vv.c | 26 +++++++++----------
.../zvabd/policy/non-overloaded/vabd_vv.c | 26 +++++++++----------
.../zvabd/policy/non-overloaded/vabdu_vv.c | 26 +++++++++----------
.../zvabd/policy/overloaded/vabd_vv.c | 26 +++++++++----------
.../zvabd/policy/overloaded/vabdu_vv.c | 26 +++++++++----------
9 files changed, 106 insertions(+), 106 deletions(-)
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 25bc401db2068..f65539e140088 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2047,8 +2047,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
// zvabd
let RequiredFeatures = ["zvabd"] in {
defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "Uv", "Uvv"]]>;
- defm vabd : RVVOutOp1BuiltinSet<"vabd", "cs", [["vv", "Uv", "Uvvv"]]>;
- defm vabdu : RVVOutOp1BuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
+ defm vabd : RVVOutBuiltinSet<"vabd", "cs", [["vv", "Uv", "Uvvv"]]>;
+ defm vabdu : RVVOutBuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
}
}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
index 4c844315ba231..cd118c3a851dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabd_vv_u16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
index 5583fed5f7995..24f112e16c55e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
index 32f51b151ba76..6ed6d7ccbe61d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabd_vv_u8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabd_vv_u8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabd_vv_u8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabd_vv_u8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabd_vv_u8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabd_vv_u8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabd_vv_u8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabd_vv_u16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabd_vv_u16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabd_vv_u16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabd_vv_u16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabd_vv_u16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabd_vv_u16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
index e649ede420f27..45ac5c87ac3e7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl)
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
index 0200da53b2b1e..35a17dbd6e3cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, vint16mf4_
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, vint16mf2_
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabd_vv_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
index 2f34320c03290..e0abe281fd4d6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16m
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16m
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
index d2e24c4807ba1..1e0e8f5cb2d2a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabd_vv_u8mf8_tu(vuint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabd_vv_u8mf4_tu(vuint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabd_vv_u8mf2_tu(vuint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabd_vv_u8m1_tu(vuint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabd_vv_u8m2_tu(vuint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabd_vv_u8m4_tu(vuint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabd_vv_u8m8_tu(vuint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, siz
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabd_vv_u16mf4_tu(vuint16mf4_t vd, vint16mf4_t vs2, vint16mf4_
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabd_vv_u16mf2_tu(vuint16mf2_t vd, vint16mf2_t vs2, vint16mf2_
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabd_vv_u16m1_tu(vuint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabd_vv_u16m2_tu(vuint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabd_vv_u16m4_tu(vuint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabd_vv_u16m8_tu(vuint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
index a76f623b4b6d2..aac1204795010 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
@@ -10,7 +10,7 @@
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
@@ -20,7 +20,7 @@ vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
@@ -30,7 +30,7 @@ vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
@@ -40,7 +40,7 @@ vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
@@ -50,7 +50,7 @@ vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
@@ -60,7 +60,7 @@ vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
@@ -70,7 +70,7 @@ vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
@@ -80,7 +80,7 @@ vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
@@ -90,7 +90,7 @@ vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16m
// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
@@ -100,7 +100,7 @@ vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16m
// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
@@ -110,7 +110,7 @@ vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t
// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
@@ -120,7 +120,7 @@ vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t
// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
@@ -130,7 +130,7 @@ vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t
// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
>From cbb978f5b746dac1e2cb30d8914cd926e13c3266 Mon Sep 17 00:00:00 2001
From: Zhenxuan Sang <sang at bytedance.com>
Date: Fri, 13 Feb 2026 12:50:42 +0800
Subject: [PATCH 4/4] Add masked .ll tests
Created using spr 1.3.6-beta.1
---
llvm/test/CodeGen/RISCV/rvv/vabd.ll | 468 +++++++++------
llvm/test/CodeGen/RISCV/rvv/vabdu.ll | 468 +++++++++------
llvm/test/CodeGen/RISCV/rvv/vabs.ll | 759 +++++++++++++++----------
llvm/test/CodeGen/RISCV/rvv/vwabda.ll | 396 ++++++++-----
llvm/test/CodeGen/RISCV/rvv/vwabdau.ll | 396 ++++++++-----
5 files changed, 1562 insertions(+), 925 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
index 9ff66ec463938..f3750e7f4513d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -1,211 +1,335 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i8> @llvm.riscv.vabd(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabd(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl)
ret <vscale x 1 x i8> %res
}
-define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i8> @llvm.riscv.vabd(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+define <vscale x 1 x i8> @vabd_vv_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 1 x i8> %passthru,
+ <vscale x 1 x i8> %a,
+ <vscale x 1 x i8> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabd(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl)
+ ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabd_vv_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 2 x i8> %passthru,
+ <vscale x 2 x i8> %a,
+ <vscale x 2 x i8> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 2 x i8> %res
}
-define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i8> @llvm.riscv.vabd(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabd(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl)
+ ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabd_vv_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 4 x i8> %passthru,
+ <vscale x 4 x i8> %a,
+ <vscale x 4 x i8> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i8> %res
}
-define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i8> @llvm.riscv.vabd(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabd(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl)
ret <vscale x 8 x i8> %res
}
-define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v10
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i8> @llvm.riscv.vabd(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+define <vscale x 8 x i8> @vabd_vv_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 8 x i8> %passthru,
+ <vscale x 8 x i8> %a,
+ <vscale x 8 x i8> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabd(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabd_vv_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vabd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 16 x i8> %passthru,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 16 x i8> %res
}
-define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v12
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v12
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i8> @llvm.riscv.vabd(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabd(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl)
ret <vscale x 32 x i8> %res
}
-define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
-; RV32-LABEL: vabd_vv_i8m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i8m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v16
-; RV64-NEXT: ret
- %res = call <vscale x 64 x i8> @llvm.riscv.vabd(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+define <vscale x 32 x i8> @vabd_vv_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vabd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 32 x i8> %passthru,
+ <vscale x 32 x i8> %a,
+ <vscale x 32 x i8> %b,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i8m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabd(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl)
+ ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabd_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i8m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vabd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabd.mask(
+ <vscale x 64 x i8> %passthru,
+ <vscale x 64 x i8> %a,
+ <vscale x 64 x i8> %b,
+ <vscale x 64 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 64 x i8> %res
}
-define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
-; RV32-LABEL: vabd_vv_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vabd(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabd(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vabd_vv_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabd.mask(
+ <vscale x 1 x i16> %passthru,
+ <vscale x 1 x i16> %a,
+ <vscale x 1 x i16> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 1 x i16> %res
}
-define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
-; RV32-LABEL: vabd_vv_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vabd(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabd(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl)
ret <vscale x 2 x i16> %res
}
-define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
-; RV32-LABEL: vabd_vv_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vabd(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+define <vscale x 2 x i16> @vabd_vv_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabd.mask(
+ <vscale x 2 x i16> %passthru,
+ <vscale x 2 x i16> %a,
+ <vscale x 2 x i16> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabd(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabd_vv_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vabd.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabd.mask(
+ <vscale x 4 x i16> %passthru,
+ <vscale x 4 x i16> %a,
+ <vscale x 4 x i16> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i16> %res
}
-define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; RV32-LABEL: vabd_vv_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v10
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vabd(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabd(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl)
ret <vscale x 8 x i16> %res
}
-define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
-; RV32-LABEL: vabd_vv_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v12
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v12
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vabd(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+define <vscale x 8 x i16> @vabd_vv_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vabd.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabd.mask(
+ <vscale x 8 x i16> %passthru,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabd(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabd_vv_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vabd.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabd.mask(
+ <vscale x 16 x i16> %passthru,
+ <vscale x 16 x i16> %a,
+ <vscale x 16 x i16> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 16 x i16> %res
}
-define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
-; RV32-LABEL: vabd_vv_i16m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; RV32-NEXT: vabd.vv v8, v8, v16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabd_vv_i16m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; RV64-NEXT: vabd.vv v8, v8, v16
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vabd(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabd_vv_i16m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vabd.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabd(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabd_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask,iXLen %vl) {
+; CHECK-LABEL: vabd_vv_mask_i16m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vabd.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabd.mask(
+ <vscale x 32 x i16> %passthru,
+ <vscale x 32 x i16> %a,
+ <vscale x 32 x i16> %b,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 32 x i16> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
index acd7502dbc345..e629e41b61172 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -1,211 +1,335 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i8> @llvm.riscv.vabdu(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabdu(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl)
ret <vscale x 1 x i8> %res
}
-define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i8> @llvm.riscv.vabdu(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+define <vscale x 1 x i8> @vabdu_vv_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 1 x i8> %passthru,
+ <vscale x 1 x i8> %a,
+ <vscale x 1 x i8> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabdu(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl)
+ ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabdu_vv_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 2 x i8> %passthru,
+ <vscale x 2 x i8> %a,
+ <vscale x 2 x i8> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 2 x i8> %res
}
-define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i8> @llvm.riscv.vabdu(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabdu(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl)
+ ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabdu_vv_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 4 x i8> %passthru,
+ <vscale x 4 x i8> %a,
+ <vscale x 4 x i8> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i8> %res
}
-define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i8> @llvm.riscv.vabdu(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabdu(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl)
ret <vscale x 8 x i8> %res
}
-define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v10
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i8> @llvm.riscv.vabdu(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+define <vscale x 8 x i8> @vabdu_vv_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 8 x i8> %passthru,
+ <vscale x 8 x i8> %a,
+ <vscale x 8 x i8> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabdu(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @vabdu_vv_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 16 x i8> %passthru,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 16 x i8> %res
}
-define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v12
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v12
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i8> @llvm.riscv.vabdu(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabdu(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl)
ret <vscale x 32 x i8> %res
}
-define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
-; RV32-LABEL: vabdu_vv_i8m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i8m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v16
-; RV64-NEXT: ret
- %res = call <vscale x 64 x i8> @llvm.riscv.vabdu(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+define <vscale x 32 x i8> @vabdu_vv_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 32 x i8> %passthru,
+ <vscale x 32 x i8> %a,
+ <vscale x 32 x i8> %b,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i8m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabdu(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen %vl)
+ ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabdu_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i8m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl8r.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask(
+ <vscale x 64 x i8> %passthru,
+ <vscale x 64 x i8> %a,
+ <vscale x 64 x i8> %b,
+ <vscale x 64 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 64 x i8> %res
}
-define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
-; RV32-LABEL: vabdu_vv_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vabdu(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabdu(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 1 x i16> @vabdu_vv_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabdu.mask(
+ <vscale x 1 x i16> %passthru,
+ <vscale x 1 x i16> %a,
+ <vscale x 1 x i16> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 1 x i16> %res
}
-define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
-; RV32-LABEL: vabdu_vv_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vabdu(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabdu(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl)
ret <vscale x 2 x i16> %res
}
-define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
-; RV32-LABEL: vabdu_vv_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v9
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v9
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vabdu(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+define <vscale x 2 x i16> @vabdu_vv_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabdu.mask(
+ <vscale x 2 x i16> %passthru,
+ <vscale x 2 x i16> %a,
+ <vscale x 2 x i16> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabdu(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabdu_vv_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabdu.mask(
+ <vscale x 4 x i16> %passthru,
+ <vscale x 4 x i16> %a,
+ <vscale x 4 x i16> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i16> %res
}
-define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; RV32-LABEL: vabdu_vv_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v10
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vabdu(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabdu(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl)
ret <vscale x 8 x i16> %res
}
-define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
-; RV32-LABEL: vabdu_vv_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v12
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v12
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vabdu(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+define <vscale x 8 x i16> @vabdu_vv_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabdu.mask(
+ <vscale x 8 x i16> %passthru,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v12
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabdu(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 16 x i16> @vabdu_vv_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabdu.mask(
+ <vscale x 16 x i16> %passthru,
+ <vscale x 16 x i16> %a,
+ <vscale x 16 x i16> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 16 x i16> %res
}
-define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
-; RV32-LABEL: vabdu_vv_i16m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; RV32-NEXT: vabdu.vv v8, v8, v16
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabdu_vv_i16m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; RV64-NEXT: vabdu.vv v8, v8, v16
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vabdu(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_i16m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vabdu.vv v8, v8, v16
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabdu(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen %vl)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabdu_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabdu_vv_mask_i16m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl8re16.v v24, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vabdu.vv v8, v16, v24, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask(
+ <vscale x 32 x i16> %passthru,
+ <vscale x 32 x i16> %a,
+ <vscale x 32 x i16> %b,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 32 x i16> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabs.ll b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
index 0bb68bdcb2f5c..db1f6088971a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
@@ -1,355 +1,536 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %a) {
-; RV32-LABEL: vabs_v_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i8> @llvm.riscv.vabs(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, iXLen -1)
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabs(<vscale x 1 x i8> poison, <vscale x 1 x i8> %v, iXLen %vl)
ret <vscale x 1 x i8> %res
}
-define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %a) {
-; RV32-LABEL: vabs_v_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i8> @llvm.riscv.vabs(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, iXLen -1)
+define <vscale x 1 x i8> @vabs_v_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 1 x i8> %passthru,
+ <vscale x 1 x i8> %v,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 1 x i8> %res
+}
+
+define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabs(<vscale x 2 x i8> poison, <vscale x 2 x i8> %v, iXLen %vl)
ret <vscale x 2 x i8> %res
}
-define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %a) {
-; RV32-LABEL: vabs_v_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i8> @llvm.riscv.vabs(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, iXLen -1)
+define <vscale x 2 x i8> @vabs_v_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 2 x i8> %passthru,
+ <vscale x 2 x i8> %v,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 2 x i8> %res
+}
+
+define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabs(<vscale x 4 x i8> poison, <vscale x 4 x i8> %v, iXLen %vl)
ret <vscale x 4 x i8> %res
}
-define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %a) {
-; RV32-LABEL: vabs_v_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i8> @llvm.riscv.vabs(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, iXLen -1)
+define <vscale x 4 x i8> @vabs_v_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 4 x i8> %passthru,
+ <vscale x 4 x i8> %v,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 4 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabs(<vscale x 8 x i8> poison, <vscale x 8 x i8> %v, iXLen %vl)
+ ret <vscale x 8 x i8> %res
+}
+
+define <vscale x 8 x i8> @vabs_v_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 8 x i8> %passthru,
+ <vscale x 8 x i8> %v,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 8 x i8> %res
}
-define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %a) {
-; RV32-LABEL: vabs_v_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i8> @llvm.riscv.vabs(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, iXLen -1)
+define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabs(<vscale x 16 x i8> poison, <vscale x 16 x i8> %v, iXLen %vl)
ret <vscale x 16 x i8> %res
}
-define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %a) {
-; RV32-LABEL: vabs_v_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i8> @llvm.riscv.vabs(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, iXLen -1)
+define <vscale x 16 x i8> @vabs_v_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %v, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vabs.v v8, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 16 x i8> %passthru,
+ <vscale x 16 x i8> %v,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabs(<vscale x 32 x i8> poison, <vscale x 32 x i8> %v, iXLen %vl)
+ ret <vscale x 32 x i8> %res
+}
+
+define <vscale x 32 x i8> @vabs_v_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %v, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vabs.v v8, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 32 x i8> %passthru,
+ <vscale x 32 x i8> %v,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 32 x i8> %res
}
-define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %a) {
-; RV32-LABEL: vabs_v_i8m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i8m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 64 x i8> @llvm.riscv.vabs(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, iXLen -1)
+define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i8m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabs(<vscale x 64 x i8> poison, <vscale x 64 x i8> %v, iXLen %vl)
+ ret <vscale x 64 x i8> %res
+}
+
+define <vscale x 64 x i8> @vabs_v_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %v, <vscale x 64 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i8m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vabs.v v8, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i8> @llvm.riscv.vabs.mask(
+ <vscale x 64 x i8> %passthru,
+ <vscale x 64 x i8> %v,
+ <vscale x 64 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 64 x i8> %res
}
-define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %a) {
-; RV32-LABEL: vabs_v_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vabs(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, iXLen -1)
+define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabs(<vscale x 1 x i16> poison, <vscale x 1 x i16> %v, iXLen %vl)
ret <vscale x 1 x i16> %res
}
-define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %a) {
-; RV32-LABEL: vabs_v_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vabs(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, iXLen -1)
+define <vscale x 1 x i16> @vabs_v_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vabs.mask(
+ <vscale x 1 x i16> %passthru,
+ <vscale x 1 x i16> %v,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabs(<vscale x 2 x i16> poison, <vscale x 2 x i16> %v, iXLen %vl)
ret <vscale x 2 x i16> %res
}
-define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %a) {
-; RV32-LABEL: vabs_v_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vabs(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, iXLen -1)
+define <vscale x 2 x i16> @vabs_v_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vabs.mask(
+ <vscale x 2 x i16> %passthru,
+ <vscale x 2 x i16> %v,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabs(<vscale x 4 x i16> poison, <vscale x 4 x i16> %v, iXLen %vl)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 4 x i16> @vabs_v_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vabs.mask(
+ <vscale x 4 x i16> %passthru,
+ <vscale x 4 x i16> %v,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i16> %res
}
-define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %a) {
-; RV32-LABEL: vabs_v_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vabs(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, iXLen -1)
+define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabs(<vscale x 8 x i16> poison, <vscale x 8 x i16> %v, iXLen %vl)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vabs_v_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vabs.v v8, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vabs.mask(
+ <vscale x 8 x i16> %passthru,
+ <vscale x 8 x i16> %v,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 8 x i16> %res
}
-define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %a) {
-; RV32-LABEL: vabs_v_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vabs(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, iXLen -1)
+define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabs(<vscale x 16 x i16> poison, <vscale x 16 x i16> %v, iXLen %vl)
ret <vscale x 16 x i16> %res
}
-define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %a) {
-; RV32-LABEL: vabs_v_i16m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i16m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vabs(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, iXLen -1)
+define <vscale x 16 x i16> @vabs_v_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %v, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vabs.v v8, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vabs.mask(
+ <vscale x 16 x i16> %passthru,
+ <vscale x 16 x i16> %v,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i16m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabs(<vscale x 32 x i16> poison, <vscale x 32 x i16> %v, iXLen %vl)
ret <vscale x 32 x i16> %res
}
-define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %a) {
-; RV32-LABEL: vabs_v_i32mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i32mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i32> @llvm.riscv.vabs(<vscale x 1 x i32> poison, <vscale x 1 x i32> %a, iXLen -1)
+define <vscale x 32 x i16> @vabs_v_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %v, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i16m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vabs.v v8, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vabs.mask(
+ <vscale x 32 x i16> %passthru,
+ <vscale x 32 x i16> %v,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vabs(<vscale x 1 x i32> poison, <vscale x 1 x i32> %v, iXLen %vl)
+ ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 1 x i32> @vabs_v_mask_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vabs.mask(
+ <vscale x 1 x i32> %passthru,
+ <vscale x 1 x i32> %v,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 1 x i32> %res
}
-define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %a) {
-; RV32-LABEL: vabs_v_i32m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i32m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i32> @llvm.riscv.vabs(<vscale x 2 x i32> poison, <vscale x 2 x i32> %a, iXLen -1)
+define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vabs(<vscale x 2 x i32> poison, <vscale x 2 x i32> %v, iXLen %vl)
ret <vscale x 2 x i32> %res
}
-define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %a) {
-; RV32-LABEL: vabs_v_i32m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i32m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i32> @llvm.riscv.vabs(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1)
+define <vscale x 2 x i32> @vabs_v_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vabs.mask(
+ <vscale x 2 x i32> %passthru,
+ <vscale x 2 x i32> %v,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vabs(<vscale x 4 x i32> poison, <vscale x 4 x i32> %v, iXLen %vl)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @vabs_v_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vabs.v v8, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vabs.mask(
+ <vscale x 4 x i32> %passthru,
+ <vscale x 4 x i32> %v,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i32> %res
}
-define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %a) {
-; RV32-LABEL: vabs_v_i32m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i32m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.riscv.vabs(<vscale x 8 x i32> poison, <vscale x 8 x i32> %a, iXLen -1)
+define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vabs(<vscale x 8 x i32> poison, <vscale x 8 x i32> %v, iXLen %vl)
ret <vscale x 8 x i32> %res
}
-define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %a) {
-; RV32-LABEL: vabs_v_i32m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i32m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i32> @llvm.riscv.vabs(<vscale x 16 x i32> poison, <vscale x 16 x i32> %a, iXLen -1)
+define <vscale x 8 x i32> @vabs_v_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vabs.v v8, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vabs.mask(
+ <vscale x 8 x i32> %passthru,
+ <vscale x 8 x i32> %v,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i32m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vabs(<vscale x 16 x i32> poison, <vscale x 16 x i32> %v, iXLen %vl)
+ ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x i32> @vabs_v_mask_i32m8(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %v, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i32m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vabs.v v8, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vabs.mask(
+ <vscale x 16 x i32> %passthru,
+ <vscale x 16 x i32> %v,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 16 x i32> %res
}
-define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %a) {
-; RV32-LABEL: vabs_v_i64m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i64m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i64> @llvm.riscv.vabs(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, iXLen -1)
+define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i64> @llvm.riscv.vabs(<vscale x 1 x i64> poison, <vscale x 1 x i64> %v, iXLen %vl)
ret <vscale x 1 x i64> %res
}
-define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %a) {
-; RV32-LABEL: vabs_v_i64m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i64m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i64> @llvm.riscv.vabs(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, iXLen -1)
+define <vscale x 1 x i64> @vabs_v_mask_i64m1(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %v, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vabs.v v8, v9, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i64> @llvm.riscv.vabs.mask(
+ <vscale x 1 x i64> %passthru,
+ <vscale x 1 x i64> %v,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 1)
+ ret <vscale x 1 x i64> %res
+}
+
+define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.riscv.vabs(<vscale x 2 x i64> poison, <vscale x 2 x i64> %v, iXLen %vl)
+ ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @vabs_v_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %v, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vabs.v v8, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i64> @llvm.riscv.vabs.mask(
+ <vscale x 2 x i64> %passthru,
+ <vscale x 2 x i64> %v,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 2 x i64> %res
}
-define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %a) {
-; RV32-LABEL: vabs_v_i64m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i64m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i64> @llvm.riscv.vabs(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, iXLen -1)
+define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i64> @llvm.riscv.vabs(<vscale x 4 x i64> poison, <vscale x 4 x i64> %v, iXLen %vl)
+ ret <vscale x 4 x i64> %res
+}
+
+define <vscale x 4 x i64> @vabs_v_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %v, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vabs.v v8, v12, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i64> @llvm.riscv.vabs.mask(
+ <vscale x 4 x i64> %passthru,
+ <vscale x 4 x i64> %v,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 4 x i64> %res
}
-define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %a) {
-; RV32-LABEL: vabs_v_i64m8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV32-NEXT: vabs.v v8, v8
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vabs_v_i64m8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: vabs.v v8, v8
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i64> @llvm.riscv.vabs(<vscale x 8 x i64> poison, <vscale x 8 x i64> %a, iXLen -1)
+define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %v, iXLen %vl) {
+; CHECK-LABEL: vabs_v_i64m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vabs.v v8, v8
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i64> @llvm.riscv.vabs(<vscale x 8 x i64> poison, <vscale x 8 x i64> %v, iXLen %vl)
+ ret <vscale x 8 x i64> %res
+}
+
+define <vscale x 8 x i64> @vabs_v_mask_i64m8(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %v, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vabs_v_mask_i64m8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vabs.v v8, v16, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i64> @llvm.riscv.vabs.mask(
+ <vscale x 8 x i64> %passthru,
+ <vscale x 8 x i64> %v,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 1)
ret <vscale x 8 x i64> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabda.ll b/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
index 5e8349620a429..99816416a7a14 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabda.ll
@@ -1,179 +1,283 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i16> @vwabda_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
-; RV32-LABEL: vwabda_vv_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vwabda(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i16> @vwabda_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabda(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 1 x i16> %res
}
-define <vscale x 2 x i16> @vwabda_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
-; RV32-LABEL: vwabda_vv_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vwabda(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 1 x i16> @vwabda_vv_mask_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabda.mask(
+ <vscale x 1 x i16> %vd,
+ <vscale x 1 x i8> %a,
+ <vscale x 1 x i8> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabda_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabda(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 2 x i16> %res
}
-define <vscale x 4 x i16> @vwabda_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
-; RV32-LABEL: vwabda_vv_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vwabda(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 2 x i16> @vwabda_vv_mask_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabda.mask(
+ <vscale x 2 x i16> %vd,
+ <vscale x 2 x i8> %a,
+ <vscale x 2 x i8> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabda_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabda(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 4 x i16> %res
}
-define <vscale x 8 x i16> @vwabda_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
-; RV32-LABEL: vwabda_vv_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV32-NEXT: vwabda.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV64-NEXT: vwabda.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vwabda(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 4 x i16> @vwabda_vv_mask_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabda.mask(
+ <vscale x 4 x i16> %vd,
+ <vscale x 4 x i8> %a,
+ <vscale x 4 x i8> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabda_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabda(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabda_vv_mask_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v10, v11, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabda.mask(
+ <vscale x 8 x i16> %vd,
+ <vscale x 8 x i8> %a,
+ <vscale x 8 x i8> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 8 x i16> %res
}
-define <vscale x 16 x i16> @vwabda_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; RV32-LABEL: vwabda_vv_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vwabda(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 16 x i16> @vwabda_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v12, v14
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabda(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 16 x i16> %res
}
-define <vscale x 32 x i16> @vwabda_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
-; RV32-LABEL: vwabda_vv_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vwabda(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 16 x i16> @vwabda_vv_mask_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v12, v14, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabda.mask(
+ <vscale x 16 x i16> %vd,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabda_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v16, v20
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabda(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabda_vv_mask_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v16, v20, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabda.mask(
+ <vscale x 32 x i16> %vd,
+ <vscale x 32 x i8> %a,
+ <vscale x 32 x i8> %b,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 32 x i16> %res
}
-define <vscale x 1 x i32> @vwabda_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
-; RV32-LABEL: vwabda_vv_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i32> @llvm.riscv.vwabda(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 1 x i32> @vwabda_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabda(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 1 x i32> @vwabda_vv_mask_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabda.mask(
+ <vscale x 1 x i32> %vd,
+ <vscale x 1 x i16> %a,
+ <vscale x 1 x i16> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 1 x i32> %res
}
-define <vscale x 2 x i32> @vwabda_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
-; RV32-LABEL: vwabda_vv_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i32> @llvm.riscv.vwabda(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 2 x i32> @vwabda_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabda(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl, iXLen 0)
ret <vscale x 2 x i32> %res
}
-define <vscale x 4 x i32> @vwabda_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
-; RV32-LABEL: vwabda_vv_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV32-NEXT: vwabda.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV64-NEXT: vwabda.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i32> @llvm.riscv.vwabda(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 2 x i32> @vwabda_vv_mask_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabda.mask(
+ <vscale x 2 x i32> %vd,
+ <vscale x 2 x i16> %a,
+ <vscale x 2 x i16> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabda_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabda(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl, iXLen 0)
ret <vscale x 4 x i32> %res
}
-define <vscale x 8 x i32> @vwabda_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; RV32-LABEL: vwabda_vv_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV32-NEXT: vwabda.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV64-NEXT: vwabda.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.riscv.vwabda(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 4 x i32> @vwabda_vv_mask_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v10, v11, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabda.mask(
+ <vscale x 4 x i32> %vd,
+ <vscale x 4 x i16> %a,
+ <vscale x 4 x i16> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabda_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v12, v14
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabda(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabda_vv_mask_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v12, v14, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabda.mask(
+ <vscale x 8 x i32> %vd,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 8 x i32> %res
}
-define <vscale x 16 x i32> @vwabda_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
-; RV32-LABEL: vwabda_vv_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV32-NEXT: vwabda.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabda_vv_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV64-NEXT: vwabda.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i32> @llvm.riscv.vwabda(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 16 x i32> @vwabda_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: vwabda.vv v8, v16, v20
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabda(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabda_vv_mask_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabda_vv_mask_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vwabda.vv v8, v16, v20, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabda.mask(
+ <vscale x 16 x i32> %vd,
+ <vscale x 16 x i16> %a,
+ <vscale x 16 x i16> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 16 x i32> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
index 42c3f0cfa1306..9f8777235dc31 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdau.ll
@@ -1,179 +1,283 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
-
-define <vscale x 1 x i16> @vwabdau_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
-; RV32-LABEL: vwabdau_vv_i8mf8:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i8mf8:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf8, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i16> @llvm.riscv.vwabdau(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i16> @vwabdau_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdau(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 1 x i16> %res
}
-define <vscale x 2 x i16> @vwabdau_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
-; RV32-LABEL: vwabdau_vv_i8mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i8mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i16> @llvm.riscv.vwabdau(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 1 x i16> @vwabdau_vv_mask_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8mf8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i16> @llvm.riscv.vwabdau.mask(
+ <vscale x 1 x i16> %vd,
+ <vscale x 1 x i8> %a,
+ <vscale x 1 x i8> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i16> %res
+}
+
+define <vscale x 2 x i16> @vwabdau_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdau(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 2 x i16> %res
}
-define <vscale x 4 x i16> @vwabdau_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
-; RV32-LABEL: vwabdau_vv_i8mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i8mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, mf2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i16> @llvm.riscv.vwabdau(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 2 x i16> @vwabdau_vv_mask_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i16> @llvm.riscv.vwabdau.mask(
+ <vscale x 2 x i16> %vd,
+ <vscale x 2 x i8> %a,
+ <vscale x 2 x i8> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 2 x i16> %res
+}
+
+define <vscale x 4 x i16> @vwabdau_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdau(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 4 x i16> %res
}
-define <vscale x 8 x i16> @vwabdau_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
-; RV32-LABEL: vwabdau_vv_i8m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i8m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m1, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i16> @llvm.riscv.vwabdau(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 4 x i16> @vwabdau_vv_mask_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i16> @llvm.riscv.vwabdau.mask(
+ <vscale x 4 x i16> %vd,
+ <vscale x 4 x i8> %a,
+ <vscale x 4 x i8> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 4 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabdau_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdau(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @vwabdau_vv_mask_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v10, v11, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i16> @llvm.riscv.vwabdau.mask(
+ <vscale x 8 x i16> %vd,
+ <vscale x 8 x i8> %a,
+ <vscale x 8 x i8> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 8 x i16> %res
}
-define <vscale x 16 x i16> @vwabdau_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
-; RV32-LABEL: vwabdau_vv_i8m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i8m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i16> @llvm.riscv.vwabdau(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 16 x i16> @vwabdau_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v12, v14
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdau(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen %vl, iXLen 0)
ret <vscale x 16 x i16> %res
}
-define <vscale x 32 x i16> @vwabdau_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
-; RV32-LABEL: vwabdau_vv_i8m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i8m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e8, m4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 32 x i16> @llvm.riscv.vwabdau(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+define <vscale x 16 x i16> @vwabdau_vv_mask_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v12, v14, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i16> @llvm.riscv.vwabdau.mask(
+ <vscale x 16 x i16> %vd,
+ <vscale x 16 x i8> %a,
+ <vscale x 16 x i8> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 16 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabdau_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v16, v20
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdau(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 32 x i16> %res
+}
+
+define <vscale x 32 x i16> @vwabdau_vv_mask_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i8m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v16, v20, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i16> @llvm.riscv.vwabdau.mask(
+ <vscale x 32 x i16> %vd,
+ <vscale x 32 x i8> %a,
+ <vscale x 32 x i8> %b,
+ <vscale x 32 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 32 x i16> %res
}
-define <vscale x 1 x i32> @vwabdau_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
-; RV32-LABEL: vwabdau_vv_i16mf4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i16mf4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 1 x i32> @llvm.riscv.vwabdau(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 1 x i32> @vwabdau_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdau(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 1 x i32> %res
+}
+
+define <vscale x 1 x i32> @vwabdau_vv_mask_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16mf4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i32> @llvm.riscv.vwabdau.mask(
+ <vscale x 1 x i32> %vd,
+ <vscale x 1 x i16> %a,
+ <vscale x 1 x i16> %b,
+ <vscale x 1 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 1 x i32> %res
}
-define <vscale x 2 x i32> @vwabdau_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
-; RV32-LABEL: vwabdau_vv_i16mf2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v9, v10
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i16mf2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v9, v10
-; RV64-NEXT: ret
- %res = call <vscale x 2 x i32> @llvm.riscv.vwabdau(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 2 x i32> @vwabdau_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v9, v10
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdau(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen %vl, iXLen 0)
ret <vscale x 2 x i32> %res
}
-define <vscale x 4 x i32> @vwabdau_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
-; RV32-LABEL: vwabdau_vv_i16m1:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v10, v11
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i16m1:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m1, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v10, v11
-; RV64-NEXT: ret
- %res = call <vscale x 4 x i32> @llvm.riscv.vwabdau(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 2 x i32> @vwabdau_vv_mask_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16mf2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v9, v10, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i32> @llvm.riscv.vwabdau.mask(
+ <vscale x 2 x i32> %vd,
+ <vscale x 2 x i16> %a,
+ <vscale x 2 x i16> %b,
+ <vscale x 2 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 2 x i32> %res
+}
+
+define <vscale x 4 x i32> @vwabdau_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v10, v11
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdau(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen %vl, iXLen 0)
ret <vscale x 4 x i32> %res
}
-define <vscale x 8 x i32> @vwabdau_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
-; RV32-LABEL: vwabdau_vv_i16m2:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v12, v14
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i16m2:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m2, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v12, v14
-; RV64-NEXT: ret
- %res = call <vscale x 8 x i32> @llvm.riscv.vwabdau(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 4 x i32> @vwabdau_vv_mask_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16m1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v10, v11, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i32> @llvm.riscv.vwabdau.mask(
+ <vscale x 4 x i32> %vd,
+ <vscale x 4 x i16> %a,
+ <vscale x 4 x i16> %b,
+ <vscale x 4 x i1> %mask,
+ iXLen %vl, iXLen 0)
+ ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabdau_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v12, v14
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdau(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 8 x i32> %res
+}
+
+define <vscale x 8 x i32> @vwabdau_vv_mask_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16m2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v12, v14, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i32> @llvm.riscv.vwabdau.mask(
+ <vscale x 8 x i32> %vd,
+ <vscale x 8 x i16> %a,
+ <vscale x 8 x i16> %b,
+ <vscale x 8 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 8 x i32> %res
}
-define <vscale x 16 x i32> @vwabdau_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
-; RV32-LABEL: vwabdau_vv_i16m4:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV32-NEXT: vwabdau.vv v8, v16, v20
-; RV32-NEXT: ret
-;
-; RV64-LABEL: vwabdau_vv_i16m4:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a0, zero, e16, m4, tu, ma
-; RV64-NEXT: vwabdau.vv v8, v16, v20
-; RV64-NEXT: ret
- %res = call <vscale x 16 x i32> @llvm.riscv.vwabdau(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+define <vscale x 16 x i32> @vwabdau_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT: vwabdau.vv v8, v16, v20
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdau(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen %vl, iXLen 0)
+ ret <vscale x 16 x i32> %res
+}
+
+define <vscale x 16 x i32> @vwabdau_vv_mask_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i1> %mask, iXLen %vl) {
+; CHECK-LABEL: vwabdau_vv_mask_i16m4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vwabdau.vv v8, v16, v20, v0.t
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i32> @llvm.riscv.vwabdau.mask(
+ <vscale x 16 x i32> %vd,
+ <vscale x 16 x i16> %a,
+ <vscale x 16 x i16> %b,
+ <vscale x 16 x i1> %mask,
+ iXLen %vl, iXLen 0)
ret <vscale x 16 x i32> %res
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
More information about the llvm-commits
mailing list