[clang] [llvm] [RISCV] Support for Zvabd fast-track proposal (PR #124239)

Pengcheng Wang via cfe-commits cfe-commits at lists.llvm.org
Fri Feb 6 02:53:22 PST 2026


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/124239

>From 80a4b97e3e0f7e0b2172518c718f87b714802640 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 16 Jul 2024 16:08:16 +0800
Subject: [PATCH 01/10] [RISCV][MC] Support experimental Zvabd instructions

The `Zvabd` is for `RISC-V Integer Vector Absolute Difference` and
it provides 5 instructions:

* `vabs.v`: Vector Signed Integer Absolute.
* `vabd.vv`: Vector Signed Integer Absolute Difference.
* `vabdu.vv`: Vector Unsigned Integer Absolute Difference.
* `vwabda.vv`: Vector Signed Integer Absolute Difference And Accumulate.
* `vwabdau.vv`: Vector Unsigned Integer Absolute Difference And Accumulate.

Doc: https://github.com/riscv/integer-vector-absolute-difference

Pull Request: https://github.com/llvm/llvm-project/pull/180139
---
 .../Driver/print-supported-extensions-riscv.c |  1 +
 .../test/Preprocessor/riscv-target-features.c |  9 +++
 llvm/docs/RISCVUsage.rst                      |  3 +
 llvm/docs/ReleaseNotes.md                     |  2 +
 llvm/lib/Target/RISCV/RISCVFeatures.td        |  6 ++
 llvm/lib/Target/RISCV/RISCVInstrInfo.td       |  1 +
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  | 27 ++++++++
 llvm/test/CodeGen/RISCV/attributes.ll         |  4 ++
 llvm/test/CodeGen/RISCV/features-info.ll      |  1 +
 llvm/test/MC/RISCV/rvv/zvabd-invalid.s        | 10 +++
 llvm/test/MC/RISCV/rvv/zvabd.s                | 63 +++++++++++++++++++
 .../TargetParser/RISCVISAInfoTest.cpp         |  1 +
 12 files changed, 128 insertions(+)
 create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
 create mode 100644 llvm/test/MC/RISCV/rvv/zvabd-invalid.s
 create mode 100644 llvm/test/MC/RISCV/rvv/zvabd.s

diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index 8337d9f12fabd..3abafb6deafb2 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -245,6 +245,7 @@
 // CHECK-NEXT:     zibi                 0.1       'Zibi' (Branch with Immediate)
 // CHECK-NEXT:     zicfilp              1.0       'Zicfilp' (Landing pad)
 // CHECK-NEXT:     zicfiss              1.0       'Zicfiss' (Shadow stack)
+// CHECK-NEXT:     zvabd                0.7       'Zvabd' (Vector Absolute Difference)
 // CHECK-NEXT:     zvbc32e              0.7       'Zvbc32e' (Vector Carryless Multiplication with 32-bits elements)
 // CHECK-NEXT:     zvfbfa               0.1       'Zvfbfa' (Additional BF16 vector compute support)
 // CHECK-NEXT:     zvfofp8min           0.2       'Zvfofp8min' (Vector OFP8 Converts)
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index e315f75b15614..833a64d23c4e0 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -140,6 +140,7 @@
 // CHECK-NOT: __riscv_zkt {{.*$}}
 // CHECK-NOT: __riscv_zmmul {{.*$}}
 // CHECK-NOT: __riscv_ztso {{.*$}}
+// CHECK-NOT: __riscv_zvabd {{.*$}}
 // CHECK-NOT: __riscv_zvbb {{.*$}}
 // CHECK-NOT: __riscv_zvbc {{.*$}}
 // CHECK-NOT: __riscv_zve32f {{.*$}}
@@ -1382,6 +1383,14 @@
 // RUN:   -o - | FileCheck --check-prefix=CHECK-ZFA-EXT %s
 // CHECK-ZFA-EXT: __riscv_zfa 1000000{{$}}
 
+// RUN: %clang --target=riscv32 -menable-experimental-extensions \
+// RUN:   -march=rv32i_zve64x_zvabd0p7 -E -dM %s \
+// RUN:   -o - | FileCheck --check-prefix=CHECK-ZVABD-EXT %s
+// RUN: %clang --target=riscv64 -menable-experimental-extensions \
+// RUN:   -march=rv64i_zve64x_zvabd0p7 -E -dM %s \
+// RUN:   -o - | FileCheck --check-prefix=CHECK-ZVABD-EXT %s
+// CHECK-ZVABD-EXT: __riscv_zvabd  7000{{$}}
+
 // RUN: %clang --target=riscv32 \
 // RUN:   -march=rv32i_zve64x_zvbb1p0 -E -dM %s \
 // RUN:   -o - | FileCheck --check-prefix=CHECK-ZVBB-EXT %s
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index b58ecc105620a..6fd52df010991 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -356,6 +356,9 @@ The primary goal of experimental support is to assist in the process of ratifica
 ``experimental-smpmpmt``
   LLVM implements the `0.6 draft specification <https://github.com/riscv/riscv-isa-manual/blob/smpmpmt/src/smpmpmt.adoc>`__.
 
+``experimental-zvabd``
+  LLVM implements the `0.7 draft specification <https://github.com/riscv/integer-vector-absolute-difference/releases/tag/v0.7>`__.
+
 To use an experimental extension from `clang`, you must add `-menable-experimental-extensions` to the command line, and specify the exact version of the experimental extension you are using.  To use an experimental extension with LLVM's internal developer tools (e.g. `llc`, `llvm-objdump`, `llvm-mc`), you must prefix the extension name with `experimental-`.  Note that you don't need to specify the version with internal tools, and shouldn't include the `experimental-` prefix with `clang`.
 
 Vendor Extensions
diff --git a/llvm/docs/ReleaseNotes.md b/llvm/docs/ReleaseNotes.md
index 2a535dc0530a0..f9be659ed9bd4 100644
--- a/llvm/docs/ReleaseNotes.md
+++ b/llvm/docs/ReleaseNotes.md
@@ -129,6 +129,8 @@ Changes to the RISC-V Backend
   `sspush`, `sspopchk`, `ssrdp`, `c.sspush`, `c.sspopchk`) are now always
   available in the assembler and disassembler without requiring their respective
   extensions.
+* Adds experimental assembler support for the 'Zvabd` (RISC-V Integer Vector
+  Absolute Difference) extension.
 
 Changes to the WebAssembly Backend
 ----------------------------------
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index e8c8543992865..de7f14802002e 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -688,6 +688,12 @@ def FeatureStdExtV
                      [FeatureStdExtZvl128b, FeatureStdExtZve64d]>,
       RISCVExtensionBitmask<0, 21>;
 
+def FeatureStdExtZvabd
+    : RISCVExperimentalExtension<0, 7, "Vector Absolute Difference">;
+def HasStdExtZvabd : Predicate<"Subtarget->hasStdExtZvabd()">,
+                     AssemblerPredicate<(all_of FeatureStdExtZvabd),
+                                        "'Zvabd' (Vector Absolute Difference)">;
+
 def FeatureStdExtZvfbfa
     : RISCVExperimentalExtension<0, 1, "Additional BF16 vector compute support",
                                  [FeatureStdExtZve32f, FeatureStdExtZfbfmin]>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 156e41ede2d1e..699a1b0bf3cd3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -2353,6 +2353,7 @@ include "RISCVInstrInfoZk.td"
 
 // Vector
 include "RISCVInstrInfoV.td"
+include "RISCVInstrInfoZvabd.td"
 include "RISCVInstrInfoZvk.td"
 include "RISCVInstrInfoZvqdotq.td"
 include "RISCVInstrInfoZvfofp8min.td"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
new file mode 100644
index 0000000000000..b8768f52af399
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -0,0 +1,27 @@
+//===-- RISCVInstrInfoZvabd.td - 'Zvabd' instructions ------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file describes the RISC-V instructions for 'Zvabd' (Vector Absolute
+/// Difference).
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction Definitions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZvabd] in {
+  defm VABS_V : VALU_MV_VS2<"vabs.v", 0b010010, 0b10000>;
+
+  def VABD_VV  : VALUVV<0b010001, OPMVV, "vabd.vv">;
+  def VABDU_VV : VALUVV<0b010011, OPMVV, "vabdu.vv">;
+
+  let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
+    def VWABDA_VV  : VALUVV<0b010101, OPMVV, "vwabda.vv">;
+    def VWABDAU_VV : VALUVV<0b010110, OPMVV, "vwabdau.vv">;
+  } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
+} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 522dc3579deb1..a17e199eed994 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -111,6 +111,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+zvkt %s -o - | FileCheck --check-prefix=RV32ZVKT %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvqdotq %s -o - | FileCheck --check-prefix=RV32ZVQDOTQ %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zvfh %s -o - | FileCheck --check-prefix=RV32ZVFH %s
+; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvabd %s -o - | FileCheck --check-prefix=RV32ZVABD %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zicond %s -o - | FileCheck --check-prefix=RV32ZICOND %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zilsd %s -o - | FileCheck --check-prefix=RV32ZILSD %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zimop %s -o - | FileCheck --check-prefix=RV32ZIMOP %s
@@ -265,6 +266,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+zvkt %s -o - | FileCheck --check-prefix=RV64ZVKT %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvqdotq %s -o - | FileCheck --check-prefix=RV64ZVQDOTQ %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zvfh %s -o - | FileCheck --check-prefix=RV64ZVFH %s
+; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvabd %s -o - | FileCheck --check-prefix=RV64ZVABD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zicond %s -o - | FileCheck --check-prefix=RV64ZICOND %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zimop %s -o - | FileCheck --check-prefix=RV64ZIMOP %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zcmop %s -o - | FileCheck --check-prefix=RV64ZCMOP %s
@@ -430,6 +432,7 @@
 ; RV32ZVKT: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
 ; RV32ZVQDOTQ: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_zvqdotq0p0"
 ; RV32ZVFH: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfhmin1p0_zve32f1p0_zve32x1p0_zvfh1p0_zvfhmin1p0_zvl32b1p0"
+; RV32ZVABD: .attribute 5, "rv32i2p1_zicsr2p0_zvabd0p7_zve32x1p0_zvl32b1p0"
 ; RV32ZICOND: .attribute 5, "rv32i2p1_zicond1p0"
 ; RV32ZILSD: .attribute 5, "rv32i2p1_zilsd1p0"
 ; RV32ZIMOP: .attribute 5, "rv32i2p1_zimop1p0"
@@ -582,6 +585,7 @@
 ; RV64ZVKT: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
 ; RV64ZVQDOTQ: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_zvqdotq0p0"
 ; RV64ZVFH: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfhmin1p0_zve32f1p0_zve32x1p0_zvfh1p0_zvfhmin1p0_zvl32b1p0"
+; RV64ZVABD: .attribute 5, "rv64i2p1_zicsr2p0_zvabd0p7_zve32x1p0_zvl32b1p0"
 ; RV64ZICOND: .attribute 5, "rv64i2p1_zicond1p0"
 ; RV64ZIMOP: .attribute 5, "rv64i2p1_zimop1p0"
 ; RV64ZCMOP: .attribute 5, "rv64i2p1_c2p0_zca1p0_zcmop1p0"
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 5eadff68895de..c2e56061bf579 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -39,6 +39,7 @@
 ; CHECK-NEXT:   experimental-zibi                - 'Zibi' (Branch with Immediate).
 ; CHECK-NEXT:   experimental-zicfilp             - 'Zicfilp' (Landing pad).
 ; CHECK-NEXT:   experimental-zicfiss             - 'Zicfiss' (Shadow stack).
+; CHECK-NEXT:   experimental-zvabd               - 'Zvabd' (Vector Absolute Difference).
 ; CHECK-NEXT:   experimental-zvbc32e             - 'Zvbc32e' (Vector Carryless Multiplication with 32-bits elements).
 ; CHECK-NEXT:   experimental-zvfbfa              - 'Zvfbfa' (Additional BF16 vector compute support).
 ; CHECK-NEXT:   experimental-zvfofp8min          - 'Zvfofp8min' (Vector OFP8 Converts).
diff --git a/llvm/test/MC/RISCV/rvv/zvabd-invalid.s b/llvm/test/MC/RISCV/rvv/zvabd-invalid.s
new file mode 100644
index 0000000000000..ec4529b9289cb
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd-invalid.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+experimental-zvabd %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+
+vwabda.vv v9, v9, v8
+# CHECK-ERROR: [[@LINE-1]]:11: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabda.vv v9, v9, v8
+
+vwabdau.vv v9, v9, v8
+# CHECK-ERROR: [[@LINE-1]]:12: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdau.vv v9, v9, v8
diff --git a/llvm/test/MC/RISCV/rvv/zvabd.s b/llvm/test/MC/RISCV/rvv/zvabd.s
new file mode 100644
index 0000000000000..2b994ebf94ba6
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd.s
@@ -0,0 +1,63 @@
+# RUN: llvm-mc -triple=riscv32 -show-encoding --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN:        | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: not llvm-mc -triple=riscv32 -show-encoding %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv32 -filetype=obj --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN:        | llvm-objdump -d --mattr=+v --mattr=+experimental-zvabd --no-print-imm-hex  - \
+# RUN:        | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv32 -filetype=obj --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN:        | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+
+vabs.v v9, v8
+# CHECK-INST: vabs.v v9, v8
+# CHECK-ENCODING: [0xd7,0x24,0x88,0x4a]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4a8824d7 <unknown>
+
+vabd.vv v10, v9, v8
+# CHECK-INST: vabd.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 46942557 <unknown>
+
+vabd.vv v10, v9, v8, v0.t
+# CHECK-INST: vabd.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 44942557 <unknown>
+
+vabdu.vv v10, v9, v8
+# CHECK-INST: vabdu.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4e942557 <unknown>
+
+vabdu.vv v10, v9, v8, v0.t
+# CHECK-INST: vabdu.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4c942557 <unknown>
+
+vwabda.vv v10, v9, v8
+# CHECK-INST: vwabda.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x56]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 56942557 <unknown>
+
+vwabda.vv v10, v9, v8, v0.t
+# CHECK-INST: vwabda.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x54]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 54942557 <unknown>
+
+vwabdau.vv v10, v9, v8
+# CHECK-INST: vwabdau.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 5a942557 <unknown>
+
+vwabdau.vv v10, v9, v8, v0.t
+# CHECK-INST: vwabdau.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 58942557 <unknown>
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index c07551e6cff00..fa09135136889 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -1397,6 +1397,7 @@ Experimental extensions
     zibi                 0.1
     zicfilp              1.0       This is a long dummy description
     zicfiss              1.0
+    zvabd                0.7
     zvbc32e              0.7
     zvfbfa               0.1
     zvfofp8min           0.2

>From 86f29a59d3e58c237861f50c26bf48f944ab36f1 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 24 Jan 2025 15:55:53 +0800
Subject: [PATCH 02/10] [RISCV][CodeGen] Lowering abds/abdu to Zvabd
 instructions

We directly lower `ISD::ABDS`/`ISD::ABDU` to `Zvabd` instructions.

Note that we only support SEW=8/16 for `vabd.vv`/`vabdu.vv`.

Pull Request: https://github.com/llvm/llvm-project/pull/180141
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  20 +-
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    |  19 +-
 .../Target/RISCV/RISCVInstrInfoVSDPatterns.td |  29 +-
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td |  32 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  |  29 ++
 llvm/test/CodeGen/RISCV/rvv/abd.ll            | 156 +++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-abd.ll    | 312 ++++++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-sad.ll    |  83 +++++
 8 files changed, 650 insertions(+), 30 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b9d8ffc19e51a..387dc2ba5f388 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -995,7 +995,15 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
                          Legal);
 
-      setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+      if (Subtarget.hasStdExtZvabd()) {
+        // Only SEW=8/16 are supported in Zvabd.
+        if (VT.getVectorElementType() == MVT::i8 ||
+            VT.getVectorElementType() == MVT::i16)
+          setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+        else
+          setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+      } else
+        setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
 
       // Custom-lower extensions and truncations from/to mask types.
       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
@@ -7526,6 +7534,8 @@ static unsigned getRISCVVLOp(SDValue Op) {
   OP_CASE(SMAX)
   OP_CASE(UMIN)
   OP_CASE(UMAX)
+  OP_CASE(ABDS)
+  OP_CASE(ABDU)
   OP_CASE(STRICT_FADD)
   OP_CASE(STRICT_FSUB)
   OP_CASE(STRICT_FMUL)
@@ -8814,8 +8824,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return lowerToScalableOp(Op, DAG);
   case ISD::ABDS:
   case ISD::ABDU: {
-    SDLoc dl(Op);
     EVT VT = Op->getValueType(0);
+    // Only SEW=8/16 are supported in Zvabd.
+    if (Subtarget.hasStdExtZvabd() && VT.isVector() &&
+        (VT.getVectorElementType() == MVT::i8 ||
+         VT.getVectorElementType() == MVT::i16))
+      return lowerToScalableOp(Op, DAG);
+
+    SDLoc dl(Op);
     SDValue LHS = DAG.getFreeze(Op->getOperand(0));
     SDValue RHS = DAG.getFreeze(Op->getOperand(1));
     bool IsSigned = Op->getOpcode() == ISD::ABDS;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4e2b162507f52..fe7dc2a21bd7f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2817,14 +2817,19 @@ multiclass VPseudoVFRDIV_VF_RM {
   }
 }
 
-multiclass VPseudoVALU_VV_VX {
- foreach m = MxList in {
-    defm "" : VPseudoBinaryV_VV<m>,
-            SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
-                        forcePassthruRead=true>;
+multiclass VPseudoVALU_VV<bit Commutable = 0> {
+  foreach m = MxList in {
+    defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
+              SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
+                          forcePassthruRead=true>;
+  }
+}
+
+multiclass VPseudoVALU_VV_VX<bit Commutable = 0> : VPseudoVALU_VV<Commutable> {
+  foreach m = MxList in {
     defm "" : VPseudoBinaryV_VX<m>,
-            SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
-                        forcePassthruRead=true>;
+              SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
+                          forcePassthruRead=true>;
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 38d0cf16a2da0..a469d7a04ec36 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -123,20 +123,29 @@ class VPatBinarySDNode_XI<SDPatternOperator vop,
                      xop_kind:$rs2,
                      avl, log2sew, TA_MA)>;
 
+multiclass VPatBinarySDNode_VV<SDPatternOperator vop, string instruction_name,
+                               list<VTypeInfo> vtilist = AllIntegerVectors,
+                               bit isSEWAware = 0,
+                               list<Predicate> ExtraPreds = []> {
+  foreach vti = vtilist in {
+    let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in
+    def : VPatBinarySDNode_VV<vop, instruction_name,
+                              vti.Vector, vti.Vector, vti.Log2SEW,
+                              vti.LMul, vti.AVL, vti.RegClass, isSEWAware>;
+  }
+}
+
 multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name,
                                   list<VTypeInfo> vtilist = AllIntegerVectors,
                                   bit isSEWAware = 0,
-                                  list<Predicate> ExtraPreds = []> {
+                                  list<Predicate> ExtraPreds = []>
+    : VPatBinarySDNode_VV<vop, instruction_name, vtilist, isSEWAware, ExtraPreds> {
   foreach vti = vtilist in {
-    let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in {
-      def : VPatBinarySDNode_VV<vop, instruction_name,
-                                vti.Vector, vti.Vector, vti.Log2SEW,
-                                vti.LMul, vti.AVL, vti.RegClass, isSEWAware>;
-      def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
-                                vti.Vector, vti.Vector, vti.Log2SEW,
-                                vti.LMul, vti.AVL, vti.RegClass,
-                                SplatPat, GPR, isSEWAware>;
-    }
+    let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in
+    def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
+                              vti.Vector, vti.Vector, vti.Log2SEW,
+                              vti.LMul, vti.AVL, vti.RegClass,
+                              SplatPat, GPR, isSEWAware>;
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 51c7175a07474..46b1cefcf6dc0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -910,21 +910,31 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
                    xop_kind:$rs2,
                    (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
 
+multiclass VPatBinaryVL_VV<SDPatternOperator vop, string instruction_name,
+                           list<VTypeInfo> vtilist = AllIntegerVectors,
+                           bit isSEWAware = 0,
+                           list<Predicate> ExtraPreds = []> {
+  foreach vti = vtilist in {
+    let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in {
+    def : VPatBinaryVL_V<vop, instruction_name, "VV",
+                         vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+                         vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                         vti.RegClass, isSEWAware>;
+    }
+  }
+}
+
 multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name,
                               list<VTypeInfo> vtilist = AllIntegerVectors,
                               bit isSEWAware = 0,
-                              list<Predicate> ExtraPreds = []> {
+                              list<Predicate> ExtraPreds = []> 
+    : VPatBinaryVL_VV<vop, instruction_name, vtilist, isSEWAware, ExtraPreds>{
   foreach vti = vtilist in {
-    let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in {
-      def : VPatBinaryVL_V<vop, instruction_name, "VV",
-                           vti.Vector, vti.Vector, vti.Vector, vti.Mask,
-                           vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
-                           vti.RegClass, isSEWAware>;
-      def : VPatBinaryVL_XI<vop, instruction_name, "VX",
-                            vti.Vector, vti.Vector, vti.Vector, vti.Mask,
-                            vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
-                            SplatPat, GPR, isSEWAware>;
-    }
+    let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in
+    def : VPatBinaryVL_XI<vop, instruction_name, "VX",
+                          vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+                          vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                          SplatPat, GPR, isSEWAware>;
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index b8768f52af399..fa7b188fc7325 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -25,3 +25,32 @@ let Predicates = [HasStdExtZvabd] in {
     def VWABDAU_VV : VALUVV<0b010110, OPMVV, "vwabdau.vv">;
   } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
 } // Predicates = [HasStdExtZvabd]
+
+//===----------------------------------------------------------------------===//
+// Pseudos
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZvabd] in {
+  defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
+  defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
+} // Predicates = [HasStdExtZvabd]
+
+//===----------------------------------------------------------------------===//
+// CodeGen Patterns
+//===----------------------------------------------------------------------===//
+let HasPassthruOp = true, HasMaskOp = true in {
+def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+} // let HasPassthruOp = true, HasMaskOp = true
+
+// These instructions are defined for SEW=8 and SEW=16, otherwise the instruction
+// encoding is reserved.
+defvar ABDIntVectors = !filter(vti, AllIntegerVectors, !or(!eq(vti.SEW, 8),
+                                                           !eq(vti.SEW, 16)));
+
+let Predicates = [HasStdExtZvabd] in {
+defm : VPatBinarySDNode_VV<abds, "PseudoVABD", ABDIntVectors>;
+defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
+
+defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
+defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
+} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index 949a9a3dfc470..c451559a29a69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV64
 
 ;
 ; SABD
@@ -14,6 +16,12 @@ define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %sub = sub <vscale x 16 x i16> %a.sext, %b.sext
@@ -30,6 +38,14 @@ define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_b_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vmxor.mm v0, v0, v8
+; ZVABD-NEXT:    vmv.v.i v8, 0
+; ZVABD-NEXT:    vmerge.vim v8, v8, 1, v0
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
   %sub = sub <vscale x 16 x i8> %a.sext, %b.sext
@@ -45,6 +61,12 @@ define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %sub = sub <vscale x 8 x i32> %a.sext, %b.sext
@@ -63,6 +85,14 @@ define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
   %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
@@ -78,6 +108,14 @@ define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vmin.vv v12, v8, v10
+; ZVABD-NEXT:    vmax.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %sub = sub <vscale x 4 x i64> %a.sext, %b.sext
@@ -96,6 +134,14 @@ define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
   %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.sext, %b.sext
@@ -111,6 +157,14 @@ define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vmin.vv v12, v8, v10
+; ZVABD-NEXT:    vmax.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %sub = sub <vscale x 2 x i128> %a.sext, %b.sext
@@ -129,6 +183,16 @@ define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v10, v8, v10
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
   %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
   %sub = sub <vscale x 2 x i64> %a.sext, %b.sext
@@ -148,6 +212,12 @@ define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %sub = sub <vscale x 16 x i16> %a.zext, %b.zext
@@ -164,6 +234,14 @@ define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_b_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vmxor.mm v0, v0, v8
+; ZVABD-NEXT:    vmv.v.i v8, 0
+; ZVABD-NEXT:    vmerge.vim v8, v8, 1, v0
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
   %sub = sub <vscale x 16 x i8> %a.zext, %b.zext
@@ -179,6 +257,12 @@ define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
@@ -197,6 +281,14 @@ define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
   %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
@@ -212,6 +304,14 @@ define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vminu.vv v12, v8, v10
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
@@ -230,6 +330,14 @@ define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
   %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -245,6 +353,14 @@ define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vminu.vv v12, v8, v10
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v10
+; ZVABD-NEXT:    vsub.vv v8, v8, v12
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %sub = sub <vscale x 2 x i128> %a.zext, %b.zext
@@ -263,6 +379,16 @@ define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v10, v8, v10
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
   %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
   %sub = sub <vscale x 2 x i64> %a.zext, %b.zext
@@ -281,6 +407,15 @@ define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <v
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_non_matching_extension:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v12, v10
+; ZVABD-NEXT:    vminu.vv v10, v8, v12
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v12
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
   %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
@@ -302,6 +437,15 @@ define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_non_matching_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v10, v8
+; ZVABD-NEXT:    vabdu.vv v10, v10, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
   %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -321,6 +465,16 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
 ; CHECK-NEXT:    vmax.vv v10, v10, v12
 ; CHECK-NEXT:    vsub.vv v8, v10, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_non_matching_promotion:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v10, v8
+; ZVABD-NEXT:    vsext.vf4 v12, v9
+; ZVABD-NEXT:    vmin.vv v8, v10, v12
+; ZVABD-NEXT:    vmax.vv v10, v10, v12
+; ZVABD-NEXT:    vsub.vv v8, v10, v8
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
   %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -331,3 +485,5 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}
 ; RV64: {{.*}}
+; ZVABD-RV32: {{.*}}
+; ZVABD-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index 0f26832cffdc8..998668dc26bb8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV64
 ;
 ; SABD
 ;
@@ -14,6 +16,12 @@ define <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8b_as_16b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i8> %a to <8 x i16>
   %b.sext = sext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.sext, %b.sext
@@ -31,6 +39,12 @@ define <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8b_as_32b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i8> %a to <8 x i32>
   %b.sext = sext <8 x i8> %b to <8 x i32>
   %sub = sub <8 x i32> %a.sext, %b.sext
@@ -48,6 +62,12 @@ define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_16b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <16 x i8> %a to <16 x i16>
   %b.sext = sext <16 x i8> %b to <16 x i16>
   %sub = sub <16 x i16> %a.sext, %b.sext
@@ -65,6 +85,12 @@ define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i16> %a to <4 x i32>
   %b.sext = sext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.sext, %b.sext
@@ -84,6 +110,14 @@ define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i8> %a to <4 x i16>
   %b.sext = sext <4 x i8> %b to <4 x i16>
   %sub = sub <4 x i16> %a.sext, %b.sext
@@ -100,6 +134,12 @@ define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i16> %a to <8 x i32>
   %b.sext = sext <8 x i16> %b to <8 x i32>
   %sub = sub <8 x i32> %a.sext, %b.sext
@@ -119,6 +159,14 @@ define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i8> %a to <8 x i16>
   %b.sext = sext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.sext, %b.sext
@@ -135,6 +183,14 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i32> %a to <2 x i64>
   %b.sext = sext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.sext, %b.sext
@@ -154,6 +210,14 @@ define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i16> %a to <2 x i32>
   %b.sext = sext <2 x i16> %b to <2 x i32>
   %sub = sub <2 x i32> %a.sext, %b.sext
@@ -170,6 +234,14 @@ define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i32> %a to <4 x i64>
   %b.sext = sext <4 x i32> %b to <4 x i64>
   %sub = sub <4 x i64> %a.sext, %b.sext
@@ -189,6 +261,14 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i16> %a to <4 x i32>
   %b.sext = sext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.sext, %b.sext
@@ -204,6 +284,14 @@ define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i64> %a to <2 x i128>
   %b.sext = sext <2 x i64> %b to <2 x i128>
   %sub = sub <2 x i128> %a.sext, %b.sext
@@ -223,6 +311,16 @@ define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v9, v8, v10
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i32> %a to <2 x i64>
   %b.sext = sext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.sext, %b.sext
@@ -243,6 +341,12 @@ define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_8b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <8 x i8> %a to <8 x i16>
   %b.zext = zext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.zext, %b.zext
@@ -260,6 +364,12 @@ define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_16b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <16 x i8> %a to <16 x i16>
   %b.zext = zext <16 x i8> %b to <16 x i16>
   %sub = sub <16 x i16> %a.zext, %b.zext
@@ -277,6 +387,12 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i16> %a to <4 x i32>
   %b.zext = zext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.zext, %b.zext
@@ -296,6 +412,14 @@ define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i8> %a to <4 x i16>
   %b.zext = zext <4 x i8> %b to <4 x i16>
   %sub = sub <4 x i16> %a.zext, %b.zext
@@ -312,6 +436,12 @@ define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_8h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <8 x i16> %a to <8 x i32>
   %b.zext = zext <8 x i16> %b to <8 x i32>
   %sub = sub <8 x i32> %a.zext, %b.zext
@@ -331,6 +461,14 @@ define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_8h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <8 x i8> %a to <8 x i16>
   %b.zext = zext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.zext, %b.zext
@@ -347,6 +485,14 @@ define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i32> %a to <2 x i64>
   %b.zext = zext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.zext, %b.zext
@@ -366,6 +512,14 @@ define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i16> %a to <2 x i32>
   %b.zext = zext <2 x i16> %b to <2 x i32>
   %sub = sub <2 x i32> %a.zext, %b.zext
@@ -382,6 +536,14 @@ define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i32> %a to <4 x i64>
   %b.zext = zext <4 x i32> %b to <4 x i64>
   %sub = sub <4 x i64> %a.zext, %b.zext
@@ -401,6 +563,14 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i16> %a to <4 x i32>
   %b.zext = zext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.zext, %b.zext
@@ -416,6 +586,14 @@ define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i64> %a to <2 x i128>
   %b.zext = zext <2 x i64> %b to <2 x i128>
   %sub = sub <2 x i128> %a.zext, %b.zext
@@ -435,6 +613,16 @@ define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v9, v8, v10
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i32> %a to <2 x i64>
   %b.zext = zext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.zext, %b.zext
@@ -451,6 +639,14 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v16i8_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
   ret <16 x i8> %abs
@@ -465,6 +661,14 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v8i16_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
   ret <8 x i16> %abs
@@ -479,6 +683,14 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v4i32_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
   ret <4 x i32> %abs
@@ -493,6 +705,14 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v2i64_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
   ret <2 x i64> %abs
@@ -507,6 +727,12 @@ define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v16i8_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nsw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
   ret <16 x i8> %abs
@@ -521,6 +747,12 @@ define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v8i16_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nsw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
   ret <8 x i16> %abs
@@ -535,6 +767,14 @@ define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v4i32_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %sub = sub nsw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
   ret <4 x i32> %abs
@@ -549,6 +789,14 @@ define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v2i64_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %sub = sub nsw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
   ret <2 x i64> %abs
@@ -563,6 +811,12 @@ define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
   %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
   %sub = sub <16 x i8> %a, %b
@@ -578,6 +832,12 @@ define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
   %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
   %sub = sub <8 x i16> %a, %b
@@ -593,6 +853,14 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
   %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
   %sub = sub <4 x i32> %a, %b
@@ -608,6 +876,14 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vmin.vv v10, v8, v9
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
   %sub = sub <2 x i64> %a, %b
@@ -623,6 +899,12 @@ define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
   %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
   %sub = sub <16 x i8> %a, %b
@@ -638,6 +920,12 @@ define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
   %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
   %sub = sub <8 x i16> %a, %b
@@ -653,6 +941,14 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
   %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
   %sub = sub <4 x i32> %a, %b
@@ -668,6 +964,14 @@ define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vminu.vv v10, v8, v9
+; ZVABD-NEXT:    vmaxu.vv v8, v8, v9
+; ZVABD-NEXT:    vsub.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
   %sub = sub <2 x i64> %a, %b
@@ -683,6 +987,12 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v16i8_com1:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
   %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
   %sub = sub <16 x i8> %a, %b
@@ -692,3 +1002,5 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}
 ; RV64: {{.*}}
+; ZVABD-RV32: {{.*}}
+; ZVABD-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index 71c32f1473b7f..65010fdbdc2bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
 
 define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-LABEL: sad_4x8_as_i16:
@@ -16,6 +18,18 @@ define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_4x8_as_i16:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vmv.s.x v9, zero
+; ZVABD-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVABD-NEXT:    vwredsumu.vs v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <4 x i8> %a to <4 x i16>
   %3 = zext <4 x i8> %b to <4 x i16>
@@ -38,6 +52,17 @@ define signext i32 @sad_4x8_as_i32(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vredsum.vs v8, v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_4x8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v9, v8
+; ZVABD-NEXT:    vmv.s.x v8, zero
+; ZVABD-NEXT:    vredsum.vs v8, v9, v8
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <4 x i8> %a to <4 x i32>
   %3 = zext <4 x i8> %b to <4 x i32>
@@ -61,6 +86,18 @@ define signext i16 @sad_16x8_as_i16(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_16x8_as_i16:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vmv.s.x v9, zero
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vwredsumu.vs v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <16 x i8> %a to <16 x i16>
   %3 = zext <16 x i8> %b to <16 x i16>
@@ -83,6 +120,17 @@ define signext i32 @sad_16x8_as_i32(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vredsum.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_16x8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v12, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v8, v12
+; ZVABD-NEXT:    vmv.s.x v12, zero
+; ZVABD-NEXT:    vredsum.vs v8, v8, v12
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <16 x i8> %a to <16 x i32>
   %3 = zext <16 x i8> %b to <16 x i32>
@@ -135,6 +183,41 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
 ; CHECK-NEXT:    vredsum.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_2block_16xi8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vle8.v v9, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v10, (a0)
+; ZVABD-NEXT:    vle8.v v11, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v14, (a0)
+; ZVABD-NEXT:    vle8.v v15, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vle8.v v9, (a0)
+; ZVABD-NEXT:    vabdu.vv v10, v10, v11
+; ZVABD-NEXT:    vle8.v v11, (a1)
+; ZVABD-NEXT:    vwaddu.vv v12, v10, v8
+; ZVABD-NEXT:    vabdu.vv v8, v14, v15
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v14, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v16, v9, v11
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
+; ZVABD-NEXT:    vzext.vf2 v12, v16
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vmv.s.x v12, zero
+; ZVABD-NEXT:    vredsum.vs v8, v8, v12
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %idx.ext8 = sext i32 %strideb to i64
   %idx.ext = sext i32 %stridea to i64

>From 80101863c7aff1750524a84d8c6140004ea1d52d Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Wed, 5 Feb 2025 19:17:16 +0800
Subject: [PATCH 03/10] [RISCV][CodeGen] Lower `ISD::ABS` to Zvabd instructions

We add pseudos/patterns for `vabs.v` instruction and handle the
lowering in `RISCVTargetLowering::lowerABS`.

Pull Request: https://github.com/llvm/llvm-project/pull/180142
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  26 +-
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td |  20 ++
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  |  26 ++
 llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td    |  20 --
 llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll     |  94 ++++++
 llvm/test/CodeGen/RISCV/rvv/abs-vp.ll         | 319 ++++++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-abd.ll    |  12 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll | 247 ++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-abs.ll    | 107 ++++++
 9 files changed, 833 insertions(+), 38 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 387dc2ba5f388..d46cb575c54c5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -996,6 +996,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          Legal);
 
       if (Subtarget.hasStdExtZvabd()) {
+        setOperationAction(ISD::ABS, VT, Legal);
         // Only SEW=8/16 are supported in Zvabd.
         if (VT.getVectorElementType() == MVT::i8 ||
             VT.getVectorElementType() == MVT::i16)
@@ -13710,17 +13711,22 @@ SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
   } else
     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
 
-  SDValue SplatZero = DAG.getNode(
-      RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
-      DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
-  SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
-                             DAG.getUNDEF(ContainerVT), Mask, VL);
-  SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
-                            DAG.getUNDEF(ContainerVT), Mask, VL);
-
+  SDValue Result;
+  if (Subtarget.hasStdExtZvabd()) {
+    Result = DAG.getNode(RISCVISD::ABS_VL, DL, ContainerVT, X,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  } else {
+    SDValue SplatZero = DAG.getNode(
+        RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+        DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
+    SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
+                               DAG.getUNDEF(ContainerVT), Mask, VL);
+    Result = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  }
   if (VT.isFixedLengthVector())
-    Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
-  return Max;
+    Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+  return Result;
 }
 
 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 46b1cefcf6dc0..46dd45876a384 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1955,6 +1955,26 @@ multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
   }
 }
 
+multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
+                         Predicate predicate = HasStdExtZvbb> {
+  foreach vti = AllIntegerVectors in {
+    let Predicates = !listconcat([predicate],
+                                 GetVTypePredicates<vti>.Predicates) in {
+      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
+                                (vti.Vector vti.RegClass:$passthru),
+                                (vti.Mask VMV0:$vm),
+                                VLOpFrag)),
+                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
+                   vti.RegClass:$passthru,
+                   vti.RegClass:$rs1,
+                   (vti.Mask VMV0:$vm),
+                   GPR:$vl,
+                   vti.Log2SEW,
+                   TAIL_AGNOSTIC)>;
+    }
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index fa7b188fc7325..139372b70e590 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -29,7 +29,23 @@ let Predicates = [HasStdExtZvabd] in {
 //===----------------------------------------------------------------------===//
 // Pseudos
 //===----------------------------------------------------------------------===//
+
+multiclass PseudoVABS {
+  foreach m = MxList in {
+    defvar mx = m.MX;
+    let VLMul = m.value in {
+      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+                       SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+      def "_V_" # mx # "_MASK" :
+        VPseudoUnaryMask<m.vrclass, m.vrclass>,
+        RISCVMaskedPseudo<MaskIdx=2>,
+        SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+    }
+  }
+}
+
 let Predicates = [HasStdExtZvabd] in {
+  defm PseudoVABS : PseudoVABS;
   defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
   defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
 } // Predicates = [HasStdExtZvabd]
@@ -38,6 +54,7 @@ let Predicates = [HasStdExtZvabd] in {
 // CodeGen Patterns
 //===----------------------------------------------------------------------===//
 let HasPassthruOp = true, HasMaskOp = true in {
+def riscv_abs_vl  : RVSDNode<"ABS_VL", SDT_RISCVIntUnOp_VL>;
 def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
@@ -53,4 +70,13 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
 
 defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
 defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
+
+foreach vti = AllIntegerVectors in {
+  def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
+            (!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
+                    (vti.Vector (IMPLICIT_DEF)),
+                    vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+}
+
+defm : VPatUnaryVL_V<riscv_abs_vl, "PseudoVABS", HasStdExtZvabd>;
 } // Predicates = [HasStdExtZvabd]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 2b94de914b995..3a5ddb8b2b994 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -719,26 +719,6 @@ defm : VPatBinarySDNode_VV_VX<clmulh, "PseudoVCLMULH", I64IntegerVectors, ExtraP
 // VL patterns
 //===----------------------------------------------------------------------===//
 
-multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
-                         Predicate predicate = HasStdExtZvbb> {
-  foreach vti = AllIntegerVectors in {
-    let Predicates = !listconcat([predicate],
-                                 GetVTypePredicates<vti>.Predicates) in {
-      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
-                                (vti.Vector vti.RegClass:$passthru),
-                                (vti.Mask VMV0:$vm),
-                                VLOpFrag)),
-                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
-                   vti.RegClass:$passthru,
-                   vti.RegClass:$rs1,
-                   (vti.Mask VMV0:$vm),
-                   GPR:$vl,
-                   vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
-    }
-  }
-}
-
 foreach vti = AllIntegerVectors in {
   let Predicates = !listconcat([HasStdExtZvkb],
                                GetVTypePredicates<vti>.Predicates) in {
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
index 7260cca64a476..868e6766fda00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
 ; CHECK-LABEL: vabs_nxv1i16:
@@ -9,6 +13,12 @@ define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 false)
   ret <vscale x 1 x i16> %r
 }
@@ -20,6 +30,12 @@ define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 false)
   ret <vscale x 2 x i16> %r
 }
@@ -31,6 +47,12 @@ define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 false)
   ret <vscale x 4 x i16> %r
 }
@@ -42,6 +64,12 @@ define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 false)
   ret <vscale x 8 x i16> %r
 }
@@ -53,6 +81,12 @@ define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 false)
   ret <vscale x 16 x i16> %r
 }
@@ -64,6 +98,12 @@ define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 false)
   ret <vscale x 32 x i16> %r
 }
@@ -75,6 +115,12 @@ define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 false)
   ret <vscale x 1 x i32> %r
 }
@@ -86,6 +132,12 @@ define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 false)
   ret <vscale x 2 x i32> %r
 }
@@ -97,6 +149,12 @@ define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 false)
   ret <vscale x 4 x i32> %r
 }
@@ -108,6 +166,12 @@ define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 false)
   ret <vscale x 8 x i32> %r
 }
@@ -119,6 +183,12 @@ define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 false)
   ret <vscale x 16 x i32> %r
 }
@@ -130,6 +200,12 @@ define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 false)
   ret <vscale x 1 x i64> %r
 }
@@ -141,6 +217,12 @@ define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 false)
   ret <vscale x 2 x i64> %r
 }
@@ -152,6 +234,12 @@ define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 false)
   ret <vscale x 4 x i64> %r
 }
@@ -163,6 +251,12 @@ define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 false)
   ret <vscale x 8 x i64> %r
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 5b215c5173211..684c9abb37353 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_abs_nxv1i8:
@@ -11,6 +15,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -22,6 +32,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -33,6 +49,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -44,6 +66,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -55,6 +83,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -66,6 +100,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -77,6 +117,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -88,6 +134,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -99,6 +151,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
@@ -110,6 +168,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
@@ -121,6 +185,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
@@ -132,6 +202,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
@@ -143,6 +219,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
@@ -154,6 +236,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
@@ -165,6 +253,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
@@ -176,6 +270,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
@@ -187,6 +287,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -198,6 +304,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -209,6 +321,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
@@ -220,6 +338,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
@@ -231,6 +355,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
@@ -242,6 +372,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
@@ -253,6 +389,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16(<vscale x 16 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
@@ -264,6 +406,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
@@ -275,6 +423,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16(<vscale x 32 x i16> %va, <vscale x 3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
@@ -286,6 +440,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
@@ -297,6 +457,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
@@ -308,6 +474,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
@@ -319,6 +491,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -330,6 +508,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -341,6 +525,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
@@ -352,6 +542,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
@@ -363,6 +559,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
@@ -374,6 +576,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
@@ -385,6 +593,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32(<vscale x 16 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
@@ -396,6 +610,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
@@ -407,6 +627,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
@@ -418,6 +644,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
@@ -429,6 +661,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
@@ -440,6 +678,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
@@ -451,6 +695,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
@@ -462,6 +712,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
@@ -473,6 +729,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x i64> %v
 }
@@ -484,6 +746,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
@@ -495,6 +763,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
@@ -506,6 +780,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
@@ -534,6 +814,28 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v24, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; ZVABD-NEXT:    vmv1r.v v24, v0
+; ZVABD-NEXT:    csrr a1, vlenb
+; ZVABD-NEXT:    srli a2, a1, 3
+; ZVABD-NEXT:    sub a3, a0, a1
+; ZVABD-NEXT:    vslidedown.vx v0, v0, a2
+; ZVABD-NEXT:    sltu a2, a0, a3
+; ZVABD-NEXT:    addi a2, a2, -1
+; ZVABD-NEXT:    and a2, a2, a3
+; ZVABD-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16, v0.t
+; ZVABD-NEXT:    bltu a0, a1, .LBB46_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    mv a0, a1
+; ZVABD-NEXT:  .LBB46_2:
+; ZVABD-NEXT:    vmv1r.v v0, v24
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i64> %v
 }
@@ -557,6 +859,23 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i3
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    csrr a1, vlenb
+; ZVABD-NEXT:    sub a2, a0, a1
+; ZVABD-NEXT:    sltu a3, a0, a2
+; ZVABD-NEXT:    addi a3, a3, -1
+; ZVABD-NEXT:    and a2, a3, a2
+; ZVABD-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16
+; ZVABD-NEXT:    bltu a0, a1, .LBB47_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    mv a0, a1
+; ZVABD-NEXT:  .LBB47_2:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index 998668dc26bb8..6bfac12fa3b99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -644,8 +644,7 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
@@ -666,8 +665,7 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
@@ -688,8 +686,7 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
@@ -710,8 +707,7 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index fa81e1f6f3514..3a6dc2ba9b9e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_abs_v2i8:
@@ -11,6 +15,12 @@ define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
@@ -22,6 +32,12 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
@@ -33,6 +49,12 @@ define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
@@ -44,6 +66,12 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
@@ -55,6 +83,12 @@ define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
@@ -66,6 +100,12 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
@@ -77,6 +117,12 @@ define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
@@ -88,6 +134,12 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
@@ -99,6 +151,12 @@ define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
@@ -110,6 +168,12 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
@@ -121,6 +185,12 @@ define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
@@ -132,6 +202,12 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
@@ -143,6 +219,12 @@ define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
@@ -154,6 +236,12 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
@@ -165,6 +253,12 @@ define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
@@ -176,6 +270,12 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
@@ -187,6 +287,12 @@ define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
@@ -198,6 +304,12 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
@@ -209,6 +321,12 @@ define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
@@ -220,6 +338,12 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
@@ -231,6 +355,12 @@ define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
@@ -242,6 +372,12 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
@@ -253,6 +389,12 @@ define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
@@ -264,6 +406,12 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
@@ -275,6 +423,12 @@ define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
@@ -286,6 +440,12 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
@@ -297,6 +457,12 @@ define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
@@ -308,6 +474,12 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
@@ -319,6 +491,12 @@ define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
@@ -330,6 +508,12 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
@@ -341,6 +525,12 @@ define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v15i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
   ret <15 x i64> %v
 }
@@ -352,6 +542,12 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v15i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
@@ -363,6 +559,12 @@ define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
@@ -374,6 +576,12 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
@@ -401,6 +609,27 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v16, v16, v24, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v32i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a2, 16
+; ZVABD-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; ZVABD-NEXT:    vslidedown.vi v24, v0, 2
+; ZVABD-NEXT:    mv a1, a0
+; ZVABD-NEXT:    bltu a0, a2, .LBB34_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    li a1, 16
+; ZVABD-NEXT:  .LBB34_2:
+; ZVABD-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    addi a1, a0, -16
+; ZVABD-NEXT:    sltu a0, a0, a1
+; ZVABD-NEXT:    addi a0, a0, -1
+; ZVABD-NEXT:    and a0, a0, a1
+; ZVABD-NEXT:    vmv1r.v v0, v24
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16, v0.t
+; ZVABD-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
@@ -425,6 +654,24 @@ define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0
 ; CHECK-NEXT:    vmax.vv v16, v16, v24
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v32i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a2, 16
+; ZVABD-NEXT:    mv a1, a0
+; ZVABD-NEXT:    bltu a0, a2, .LBB35_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    li a1, 16
+; ZVABD-NEXT:  .LBB35_2:
+; ZVABD-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    addi a1, a0, -16
+; ZVABD-NEXT:    sltu a0, a0, a1
+; ZVABD-NEXT:    addi a0, a0, -1
+; ZVABD-NEXT:    and a0, a0, a1
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16
+; ZVABD-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 847722ae6b8ab..05c2d101ea6bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 define void @abs_v16i8(ptr %x) {
 ; CHECK-LABEL: abs_v16i8:
@@ -11,6 +15,14 @@ define void @abs_v16i8(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse8.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <16 x i8>, ptr %x
   %b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
   store <16 x i8> %b, ptr %x
@@ -26,6 +38,14 @@ define void @abs_v8i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <8 x i16>, ptr %x
   %b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
   store <8 x i16> %b, ptr %x
@@ -41,6 +61,14 @@ define void @abs_v6i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v6i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <6 x i16>, ptr %x
   %b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false)
   store <6 x i16> %b, ptr %x
@@ -56,6 +84,14 @@ define void @abs_v4i32(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse32.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false)
   store <4 x i32> %b, ptr %x
@@ -71,6 +107,14 @@ define void @abs_v2i64(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vle64.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <2 x i64>, ptr %x
   %b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false)
   store <2 x i64> %b, ptr %x
@@ -87,6 +131,15 @@ define void @abs_v32i8(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v32i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a1, 32
+; ZVABD-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse8.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <32 x i8>, ptr %x
   %b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
   store <32 x i8> %b, ptr %x
@@ -102,6 +155,14 @@ define void @abs_v16i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <16 x i16>, ptr %x
   %b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
   store <16 x i16> %b, ptr %x
@@ -117,6 +178,14 @@ define void @abs_v8i32(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse32.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <8 x i32>, ptr %x
   %b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
   store <8 x i32> %b, ptr %x
@@ -132,6 +201,14 @@ define void @abs_v4i64(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVABD-NEXT:    vle64.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i64>, ptr %x
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
   store <4 x i64> %b, ptr %x
@@ -149,6 +226,16 @@ define void @abs_v4i64_of_sext_v4i8(ptr %x) {
 ; CHECK-NEXT:    vzext.vf8 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf8 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i8>, ptr %x
   %a.ext = sext <4 x i8> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -167,6 +254,16 @@ define void @abs_v4i64_of_sext_v4i16(ptr %x) {
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i16>, ptr %x
   %a.ext = sext <4 x i16> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -185,6 +282,16 @@ define void @abs_v4i64_of_sext_v4i32(ptr %x) {
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %a.ext = sext <4 x i32> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)

>From a9cb74a3feeef2d5b2aaf77b6e35b62a7b3ac9ea Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Wed, 5 Nov 2025 18:48:42 +0800
Subject: [PATCH 04/10] [RISCV][TTI] Adjust the cost of `llvm.abs` intrinsic
 when `Zvabd` exists

When `Zvabd` exists, `llvm.abs` is lowered to `vabs.v` so the cost
is 1.

Pull Request: https://github.com/llvm/llvm-project/pull/180146
---
 .../Target/RISCV/RISCVTargetTransformInfo.cpp |  5 +++
 llvm/test/Analysis/CostModel/RISCV/abs.ll     | 36 +++++++++++++++++++
 2 files changed, 41 insertions(+)

diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index dc4bf0784e2d1..e83ca19040555 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1577,6 +1577,11 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
   case Intrinsic::abs: {
     auto LT = getTypeLegalizationCost(RetTy);
     if (ST->hasVInstructions() && LT.second.isVector()) {
+      // vabs.v v10, v8
+      if (ST->hasStdExtZvabd())
+        return LT.first *
+               getRISCVInstructionCost({RISCV::VABS_V}, LT.second, CostKind);
+
       // vrsub.vi v10, v8, 0
       // vmax.vv v8, v8, v10
       return LT.first *
diff --git a/llvm/test/Analysis/CostModel/RISCV/abs.ll b/llvm/test/Analysis/CostModel/RISCV/abs.ll
index b1f93f3811580..80dd006c6ee77 100644
--- a/llvm/test/Analysis/CostModel/RISCV/abs.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/abs.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
 ; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s
+; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
 ; Check that we don't crash querying costs when vectors are not enabled.
 ; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64
 
@@ -38,6 +39,41 @@ define i32 @abs(i32 %arg) {
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %31 = call <vscale x 32 x i8> @llvm.abs.nxv32i8(<vscale x 32 x i8> undef, i1 false)
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %32 = call <vscale x 64 x i8> @llvm.abs.nxv64i8(<vscale x 64 x i8> undef, i1 false)
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
+;
+; ZVABD-LABEL: 'abs'
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x i64> @llvm.abs.v2i64(<2 x i64> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %2 = call <4 x i64> @llvm.abs.v4i64(<4 x i64> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %3 = call <8 x i64> @llvm.abs.v8i64(<8 x i64> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %4 = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %5 = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %6 = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <2 x i32> @llvm.abs.v2i32(<2 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <4 x i32> @llvm.abs.v4i32(<4 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %9 = call <8 x i32> @llvm.abs.v8i32(<8 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %10 = call <16 x i32> @llvm.abs.v16i32(<16 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %12 = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %13 = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %14 = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %15 = call <2 x i16> @llvm.abs.v2i16(<2 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <4 x i16> @llvm.abs.v4i16(<4 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <8 x i16> @llvm.abs.v8i16(<8 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %18 = call <16 x i16> @llvm.abs.v16i16(<16 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %19 = call <32 x i16> @llvm.abs.v32i16(<32 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %20 = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %21 = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %22 = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %23 = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %24 = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %25 = call <8 x i8> @llvm.abs.v8i8(<8 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %26 = call <16 x i8> @llvm.abs.v16i8(<16 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %27 = call <32 x i8> @llvm.abs.v32i8(<32 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %28 = call <64 x i8> @llvm.abs.v64i8(<64 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %29 = call <vscale x 8 x i8> @llvm.abs.nxv8i8(<vscale x 8 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %30 = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %31 = call <vscale x 32 x i8> @llvm.abs.nxv32i8(<vscale x 32 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %32 = call <vscale x 64 x i8> @llvm.abs.nxv64i8(<vscale x 64 x i8> undef, i1 false)
+; ZVABD-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
 ;
   call <2 x i64> @llvm.abs.v2i64(<2 x i64> undef, i1 false)
   call <4 x i64> @llvm.abs.v4i64(<4 x i64> undef, i1 false)

>From 853a0c25070f608c6f551789b0e3001ea58e7d4b Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Fri, 6 Feb 2026 17:33:17 +0800
Subject: [PATCH 05/10] [RISCV] Add precommit test for vwabdacc(u) combine

Pull Request: https://github.com/llvm/llvm-project/pull/180161
---
 .../CodeGen/RISCV/rvv/fixed-vectors-sad.ll    | 120 ++++++++++++++++++
 1 file changed, 120 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index 65010fdbdc2bb..9f6c34cb052ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -261,3 +261,123 @@ entry:
   ret i32 %op.rdx.3
 }
 
+define signext i32 @sadu_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea, i32 signext %strideb) {
+; CHECK-LABEL: sadu_2block_16xi8_as_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vle8.v v9, (a1)
+; CHECK-NEXT:    add a0, a0, a2
+; CHECK-NEXT:    add a1, a1, a3
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vle8.v v11, (a1)
+; CHECK-NEXT:    add a0, a0, a2
+; CHECK-NEXT:    add a1, a1, a3
+; CHECK-NEXT:    vle8.v v12, (a0)
+; CHECK-NEXT:    vle8.v v13, (a1)
+; CHECK-NEXT:    add a0, a0, a2
+; CHECK-NEXT:    add a1, a1, a3
+; CHECK-NEXT:    vmin.vv v14, v8, v9
+; CHECK-NEXT:    vmax.vv v8, v8, v9
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vsub.vv v8, v8, v14
+; CHECK-NEXT:    vmin.vv v14, v10, v11
+; CHECK-NEXT:    vmax.vv v10, v10, v11
+; CHECK-NEXT:    vle8.v v11, (a1)
+; CHECK-NEXT:    vsub.vv v10, v10, v14
+; CHECK-NEXT:    vmin.vv v14, v12, v13
+; CHECK-NEXT:    vmax.vv v15, v12, v13
+; CHECK-NEXT:    vwaddu.vv v12, v10, v8
+; CHECK-NEXT:    vsub.vv v8, v15, v14
+; CHECK-NEXT:    vmin.vv v10, v9, v11
+; CHECK-NEXT:    vmax.vv v9, v9, v11
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v14, v8
+; CHECK-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vsub.vv v16, v9, v10
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vwaddu.vv v8, v14, v12
+; CHECK-NEXT:    vzext.vf2 v12, v16
+; CHECK-NEXT:    vwaddu.wv v8, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.s.x v12, zero
+; CHECK-NEXT:    vredsum.vs v8, v8, v12
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sadu_2block_16xi8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vle8.v v9, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v10, (a0)
+; ZVABD-NEXT:    vle8.v v11, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v14, (a0)
+; ZVABD-NEXT:    vle8.v v15, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    vle8.v v9, (a0)
+; ZVABD-NEXT:    vabd.vv v10, v10, v11
+; ZVABD-NEXT:    vle8.v v11, (a1)
+; ZVABD-NEXT:    vwaddu.vv v12, v10, v8
+; ZVABD-NEXT:    vabd.vv v8, v14, v15
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v14, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v16, v9, v11
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
+; ZVABD-NEXT:    vzext.vf2 v12, v16
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vmv.s.x v12, zero
+; ZVABD-NEXT:    vredsum.vs v8, v8, v12
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
+entry:
+  %idx.ext8 = sext i32 %strideb to i64
+  %idx.ext = sext i32 %stridea to i64
+  %0 = load <16 x i8>, ptr %a, align 1
+  %1 = sext <16 x i8> %0 to <16 x i32>
+  %2 = load <16 x i8>, ptr %b, align 1
+  %3 = sext <16 x i8> %2 to <16 x i32>
+  %4 = sub nsw <16 x i32> %1, %3
+  %5 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %4, i1 true)
+  %6 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %5)
+  %add.ptr = getelementptr inbounds i8, ptr %a, i64 %idx.ext
+  %add.ptr9 = getelementptr inbounds i8, ptr %b, i64 %idx.ext8
+  %7 = load <16 x i8>, ptr %add.ptr, align 1
+  %8 = sext <16 x i8> %7 to <16 x i32>
+  %9 = load <16 x i8>, ptr %add.ptr9, align 1
+  %10 = sext <16 x i8> %9 to <16 x i32>
+  %11 = sub nsw <16 x i32> %8, %10
+  %12 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %11, i1 true)
+  %13 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %12)
+  %op.rdx.1 = add i32 %13, %6
+  %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+  %add.ptr9.1 = getelementptr inbounds i8, ptr %add.ptr9, i64 %idx.ext8
+  %14 = load <16 x i8>, ptr %add.ptr.1, align 1
+  %15 = sext <16 x i8> %14 to <16 x i32>
+  %16 = load <16 x i8>, ptr %add.ptr9.1, align 1
+  %17 = sext <16 x i8> %16 to <16 x i32>
+  %18 = sub nsw <16 x i32> %15, %17
+  %19 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %18, i1 true)
+  %20 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %19)
+  %op.rdx.2 = add i32 %20, %op.rdx.1
+  %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+  %add.ptr9.2 = getelementptr inbounds i8, ptr %add.ptr9.1, i64 %idx.ext8
+  %21 = load <16 x i8>, ptr %add.ptr.2, align 1
+  %22 = sext <16 x i8> %21 to <16 x i32>
+  %23 = load <16 x i8>, ptr %add.ptr9.2, align 1
+  %24 = sext <16 x i8> %23 to <16 x i32>
+  %25 = sub nsw <16 x i32> %22, %24
+  %26 = tail call <16 x i32> @llvm.abs.v16i32(<16 x i32> %25, i1 true)
+  %27 = tail call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %26)
+  %op.rdx.3 = add i32 %27, %op.rdx.2
+  ret i32 %op.rdx.3
+}

>From 5c9ea394a5fdeb3c8bcb62f0b8ab13171a9bf9df Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Thu, 6 Feb 2025 16:16:04 +0800
Subject: [PATCH 06/10] [RISCV][CodeGen] Combine vwaddu+vabd(u) to vwabdacc(u)

Note that we only support SEW=8/16 for `vwabdacc(u)`.

Pull Request: https://github.com/llvm/llvm-project/pull/180162
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 44 +++++++++++++++++++
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 16 ++++++-
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  | 22 +++++++++-
 .../CodeGen/RISCV/rvv/fixed-vectors-sad.ll    | 24 +++++-----
 4 files changed, 93 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d46cb575c54c5..171fc391a7aa8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18770,6 +18770,48 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
                      N->getFlags());
 }
 
+// vwaddu C (vabd A B) -> vwabda(A B C)
+// vwaddu C (vabdu A B) -> vwabdau(A B C)
+static SDValue performVWABDACombine(SDNode *N, SelectionDAG &DAG,
+                                    const RISCVSubtarget &Subtarget) {
+  if (!Subtarget.hasStdExtZvabd())
+    return SDValue();
+
+  MVT VT = N->getSimpleValueType(0);
+  if (VT.getVectorElementType() != MVT::i8 &&
+      VT.getVectorElementType() != MVT::i16)
+    return SDValue();
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  SDValue Passthru = N->getOperand(2);
+  if (!Passthru->isUndef())
+    return SDValue();
+
+  SDValue Mask = N->getOperand(3);
+  SDValue VL = N->getOperand(4);
+  auto IsABD = [](SDValue Op) {
+    if (Op->getOpcode() != RISCVISD::ABDS_VL &&
+        Op->getOpcode() != RISCVISD::ABDU_VL)
+      return SDValue();
+    return Op;
+  };
+
+  SDValue Diff = IsABD(Op0);
+  Diff = Diff ? Diff : IsABD(Op1);
+  if (!Diff)
+    return SDValue();
+  SDValue Acc = Diff == Op0 ? Op1 : Op0;
+
+  SDLoc DL(N);
+  Acc = DAG.getNode(RISCVISD::VZEXT_VL, DL, VT, Acc, Mask, VL);
+  SDValue Result = DAG.getNode(
+      Diff.getOpcode() == RISCVISD::ABDS_VL ? RISCVISD::VWABDA_VL
+                                            : RISCVISD::VWABDAU_VL,
+      DL, VT, Diff.getOperand(0), Diff.getOperand(1), Acc, Mask, VL);
+  return Result;
+}
+
 static SDValue performVWADDSUBW_VLCombine(SDNode *N,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const RISCVSubtarget &Subtarget) {
@@ -21681,6 +21723,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     if (SDValue V = combineVqdotAccum(N, DAG, Subtarget))
       return V;
     return combineToVWMACC(N, DAG, Subtarget);
+  case RISCVISD::VWADDU_VL:
+    return performVWABDACombine(N, DAG, Subtarget);
   case RISCVISD::VWADD_W_VL:
   case RISCVISD::VWADDU_W_VL:
   case RISCVISD::VWSUB_W_VL:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 46dd45876a384..d1bcaffdeac5b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1750,8 +1750,9 @@ multiclass VPatMultiplyAddVL_VV_VX<SDNode op, string instruction_name> {
   }
 }
 
-multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> {
-  foreach vtiTowti = AllWidenableIntVectors in {
+multiclass VPatWidenMultiplyAddVL_VV<SDNode vwmacc_op, string instr_name,
+                                     list<VTypeInfoToWide> vtilist = AllWidenableIntVectors> {
+  foreach vtiTowti = vtilist in {
     defvar vti = vtiTowti.Vti;
     defvar wti = vtiTowti.Wti;
     let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
@@ -1763,6 +1764,17 @@ multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> {
                 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK")
                     wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                     (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+    }
+  }
+}
+
+multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name>
+    : VPatWidenMultiplyAddVL_VV<vwmacc_op, instr_name> {
+  foreach vtiTowti = AllWidenableIntVectors in {
+    defvar vti = vtiTowti.Vti;
+    defvar wti = vtiTowti.Wti;
+    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+                                 GetVTypePredicates<wti>.Predicates) in {
       def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1),
                            (vti.Vector vti.RegClass:$rs2),
                            (wti.Vector wti.RegClass:$rd),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 139372b70e590..46261d83711cc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -29,7 +29,6 @@ let Predicates = [HasStdExtZvabd] in {
 //===----------------------------------------------------------------------===//
 // Pseudos
 //===----------------------------------------------------------------------===//
-
 multiclass PseudoVABS {
   foreach m = MxList in {
     defvar mx = m.MX;
@@ -44,10 +43,23 @@ multiclass PseudoVABS {
   }
 }
 
+multiclass VPseudoVWABD_VV {
+  foreach m = MxListW in {
+    defvar mx = m.MX;
+    defm "" : VPseudoTernaryW_VV<m, Commutable = 1>,
+              SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV",
+                           "ReadVIWMulAddV", "ReadVIWMulAddV", mx>;
+  }
+}
+
 let Predicates = [HasStdExtZvabd] in {
   defm PseudoVABS : PseudoVABS;
   defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
   defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
+  let IsRVVWideningReduction = 1 in {
+    defm PseudoVWABDA : VPseudoVWABD_VV;
+    defm PseudoVWABDAU : VPseudoVWABD_VV;
+  } // IsRVVWideningReduction = 1
 } // Predicates = [HasStdExtZvabd]
 
 //===----------------------------------------------------------------------===//
@@ -57,12 +69,17 @@ let HasPassthruOp = true, HasMaskOp = true in {
 def riscv_abs_vl  : RVSDNode<"ABS_VL", SDT_RISCVIntUnOp_VL>;
 def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def rvv_vwabda_vl  : RVSDNode<"VWABDA_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
+def rvv_vwabdau_vl : RVSDNode<"VWABDAU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
 
 // These instructions are defined for SEW=8 and SEW=16, otherwise the instruction
 // encoding is reserved.
 defvar ABDIntVectors = !filter(vti, AllIntegerVectors, !or(!eq(vti.SEW, 8),
                                                            !eq(vti.SEW, 16)));
+defvar ABDAIntVectors = !filter(vtiTowti, AllWidenableIntVectors,
+                                          !or(!eq(vtiTowti.Vti.SEW, 8),
+                                              !eq(vtiTowti.Vti.SEW, 16)));
 
 let Predicates = [HasStdExtZvabd] in {
 defm : VPatBinarySDNode_VV<abds, "PseudoVABD", ABDIntVectors>;
@@ -79,4 +96,7 @@ foreach vti = AllIntegerVectors in {
 }
 
 defm : VPatUnaryVL_V<riscv_abs_vl, "PseudoVABS", HasStdExtZvabd>;
+
+defm : VPatWidenMultiplyAddVL_VV<rvv_vwabda_vl, "PseudoVWABDA", ABDAIntVectors>;
+defm : VPatWidenMultiplyAddVL_VV<rvv_vwabdau_vl, "PseudoVWABDAU", ABDAIntVectors>;
 } // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index 9f6c34cb052ff..dcb8b31c682b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -199,16 +199,18 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
 ; ZVABD-NEXT:    vle8.v v15, (a1)
 ; ZVABD-NEXT:    add a0, a0, a2
 ; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v16, (a0)
+; ZVABD-NEXT:    vle8.v v17, (a1)
 ; ZVABD-NEXT:    vabdu.vv v8, v8, v9
-; ZVABD-NEXT:    vle8.v v9, (a0)
-; ZVABD-NEXT:    vabdu.vv v10, v10, v11
-; ZVABD-NEXT:    vle8.v v11, (a1)
-; ZVABD-NEXT:    vwaddu.vv v12, v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v12, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vwabdau.vv v12, v10, v11
 ; ZVABD-NEXT:    vabdu.vv v8, v14, v15
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v14, v8
 ; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v16, v9, v11
+; ZVABD-NEXT:    vabdu.vv v16, v16, v17
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
 ; ZVABD-NEXT:    vzext.vf2 v12, v16
@@ -320,16 +322,18 @@ define signext i32 @sadu_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stride
 ; ZVABD-NEXT:    vle8.v v15, (a1)
 ; ZVABD-NEXT:    add a0, a0, a2
 ; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v16, (a0)
+; ZVABD-NEXT:    vle8.v v17, (a1)
 ; ZVABD-NEXT:    vabd.vv v8, v8, v9
-; ZVABD-NEXT:    vle8.v v9, (a0)
-; ZVABD-NEXT:    vabd.vv v10, v10, v11
-; ZVABD-NEXT:    vle8.v v11, (a1)
-; ZVABD-NEXT:    vwaddu.vv v12, v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v12, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vwabda.vv v12, v10, v11
 ; ZVABD-NEXT:    vabd.vv v8, v14, v15
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v14, v8
 ; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v16, v9, v11
+; ZVABD-NEXT:    vabd.vv v16, v16, v17
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
 ; ZVABD-NEXT:    vzext.vf2 v12, v16

>From 633c8df9fc36fb3f9a0bf0d154fb3b567f2cef95 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Wed, 5 Nov 2025 18:57:04 +0800
Subject: [PATCH 07/10] [RISCV] Add precommit test for vwaddu_wv+vabd(u) to
 vwabdacc(u) combine

---
 llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll | 327 +++++++++++++++++++++
 1 file changed, 327 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll

diff --git a/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll
new file mode 100644
index 0000000000000..f32c9b72dc30a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll
@@ -0,0 +1,327 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
+
+define signext i32 @PIXEL_SAD_C(ptr %pix1, i32 %i_stride_pix1, ptr %pix2, i32 %i_stride_pix2, i32 %i_width, i32 %i_height){
+; CHECK-LABEL: PIXEL_SAD_C:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    sext.w a6, a5
+; CHECK-NEXT:    blez a6, .LBB0_12
+; CHECK-NEXT:  # %bb.1: # %for.cond1.preheader.lr.ph
+; CHECK-NEXT:    addi sp, sp, -32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_offset s0, -8
+; CHECK-NEXT:    .cfi_offset s1, -16
+; CHECK-NEXT:    .cfi_offset s2, -24
+; CHECK-NEXT:    li a7, 0
+; CHECK-NEXT:    li t0, 0
+; CHECK-NEXT:    li a5, 0
+; CHECK-NEXT:    sext.w a1, a1
+; CHECK-NEXT:    sext.w a3, a3
+; CHECK-NEXT:    slli t1, a4, 32
+; CHECK-NEXT:    csrr t2, vlenb
+; CHECK-NEXT:    sext.w a4, a4
+; CHECK-NEXT:    vsetvli t3, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.i v12, 0
+; CHECK-NEXT:    vsetvli t3, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    srli t1, t1, 32
+; CHECK-NEXT:    srli t2, t2, 1
+; CHECK-NEXT:    neg t3, t2
+; CHECK-NEXT:    and t3, t3, t1
+; CHECK-NEXT:    vmv.s.x v13, zero
+; CHECK-NEXT:    mv t4, a2
+; CHECK-NEXT:    j .LBB0_3
+; CHECK-NEXT:  .LBB0_2: # %for.cond.cleanup3
+; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:    add t4, t4, a3
+; CHECK-NEXT:    addiw t0, t0, 1
+; CHECK-NEXT:    addi a7, a7, 1
+; CHECK-NEXT:    beq t0, a6, .LBB0_11
+; CHECK-NEXT:  .LBB0_3: # %for.cond1.preheader
+; CHECK-NEXT:    # =>This Loop Header: Depth=1
+; CHECK-NEXT:    # Child Loop BB0_7 Depth 2
+; CHECK-NEXT:    # Child Loop BB0_10 Depth 2
+; CHECK-NEXT:    blez a4, .LBB0_2
+; CHECK-NEXT:  # %bb.4: # %for.body4.preheader
+; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT:    bgeu t1, t2, .LBB0_6
+; CHECK-NEXT:  # %bb.5: # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT:    li s0, 0
+; CHECK-NEXT:    j .LBB0_9
+; CHECK-NEXT:  .LBB0_6: # %vector.ph
+; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; CHECK-NEXT:    vmv.s.x v14, a5
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v10, v14
+; CHECK-NEXT:    mv a5, a0
+; CHECK-NEXT:    mv t5, t4
+; CHECK-NEXT:    mv t6, t3
+; CHECK-NEXT:  .LBB0_7: # %vector.body
+; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
+; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vle8.v v14, (a5)
+; CHECK-NEXT:    vle8.v v15, (t5)
+; CHECK-NEXT:    sub t6, t6, t2
+; CHECK-NEXT:    add t5, t5, t2
+; CHECK-NEXT:    vminu.vv v16, v14, v15
+; CHECK-NEXT:    vmaxu.vv v14, v14, v15
+; CHECK-NEXT:    vsub.vv v14, v14, v16
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vzext.vf2 v15, v14
+; CHECK-NEXT:    vwaddu.wv v10, v10, v15
+; CHECK-NEXT:    add a5, a5, t2
+; CHECK-NEXT:    bnez t6, .LBB0_7
+; CHECK-NEXT:  # %bb.8: # %middle.block
+; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vredsum.vs v10, v10, v13
+; CHECK-NEXT:    vmv.x.s a5, v10
+; CHECK-NEXT:    mv s0, t3
+; CHECK-NEXT:    beq t3, t1, .LBB0_2
+; CHECK-NEXT:  .LBB0_9: # %for.body4.preheader31
+; CHECK-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT:    mul t6, a3, a7
+; CHECK-NEXT:    add t5, t4, s0
+; CHECK-NEXT:    add t6, t1, t6
+; CHECK-NEXT:    add t6, a2, t6
+; CHECK-NEXT:    add s0, a0, s0
+; CHECK-NEXT:  .LBB0_10: # %for.body4
+; CHECK-NEXT:    # Parent Loop BB0_3 Depth=1
+; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
+; CHECK-NEXT:    lbu s1, 0(s0)
+; CHECK-NEXT:    lbu s2, 0(t5)
+; CHECK-NEXT:    addi t5, t5, 1
+; CHECK-NEXT:    sub s1, s1, s2
+; CHECK-NEXT:    srai s2, s1, 63
+; CHECK-NEXT:    xor s1, s1, s2
+; CHECK-NEXT:    sub a5, s2, a5
+; CHECK-NEXT:    subw a5, s1, a5
+; CHECK-NEXT:    addi s0, s0, 1
+; CHECK-NEXT:    bne t5, t6, .LBB0_10
+; CHECK-NEXT:    j .LBB0_2
+; CHECK-NEXT:  .LBB0_11:
+; CHECK-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    .cfi_restore s0
+; CHECK-NEXT:    .cfi_restore s1
+; CHECK-NEXT:    .cfi_restore s2
+; CHECK-NEXT:    addi sp, sp, 32
+; CHECK-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-NEXT:    mv a0, a5
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_12:
+; CHECK-NEXT:    li a0, 0
+; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: PIXEL_SAD_C:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    sext.w a6, a5
+; ZVABD-NEXT:    blez a6, .LBB0_12
+; ZVABD-NEXT:  # %bb.1: # %for.cond1.preheader.lr.ph
+; ZVABD-NEXT:    addi sp, sp, -32
+; ZVABD-NEXT:    .cfi_def_cfa_offset 32
+; ZVABD-NEXT:    sd s0, 24(sp) # 8-byte Folded Spill
+; ZVABD-NEXT:    sd s1, 16(sp) # 8-byte Folded Spill
+; ZVABD-NEXT:    sd s2, 8(sp) # 8-byte Folded Spill
+; ZVABD-NEXT:    .cfi_offset s0, -8
+; ZVABD-NEXT:    .cfi_offset s1, -16
+; ZVABD-NEXT:    .cfi_offset s2, -24
+; ZVABD-NEXT:    li a7, 0
+; ZVABD-NEXT:    li t0, 0
+; ZVABD-NEXT:    li a5, 0
+; ZVABD-NEXT:    sext.w a1, a1
+; ZVABD-NEXT:    sext.w a3, a3
+; ZVABD-NEXT:    slli t1, a4, 32
+; ZVABD-NEXT:    csrr t2, vlenb
+; ZVABD-NEXT:    sext.w a4, a4
+; ZVABD-NEXT:    vsetvli t3, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vmv.v.i v12, 0
+; ZVABD-NEXT:    vsetvli t3, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vmv.v.i v8, 0
+; ZVABD-NEXT:    srli t1, t1, 32
+; ZVABD-NEXT:    srli t2, t2, 1
+; ZVABD-NEXT:    neg t3, t2
+; ZVABD-NEXT:    and t3, t3, t1
+; ZVABD-NEXT:    vmv.s.x v13, zero
+; ZVABD-NEXT:    mv t4, a2
+; ZVABD-NEXT:    j .LBB0_3
+; ZVABD-NEXT:  .LBB0_2: # %for.cond.cleanup3
+; ZVABD-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; ZVABD-NEXT:    add a0, a0, a1
+; ZVABD-NEXT:    add t4, t4, a3
+; ZVABD-NEXT:    addiw t0, t0, 1
+; ZVABD-NEXT:    addi a7, a7, 1
+; ZVABD-NEXT:    beq t0, a6, .LBB0_11
+; ZVABD-NEXT:  .LBB0_3: # %for.cond1.preheader
+; ZVABD-NEXT:    # =>This Loop Header: Depth=1
+; ZVABD-NEXT:    # Child Loop BB0_7 Depth 2
+; ZVABD-NEXT:    # Child Loop BB0_10 Depth 2
+; ZVABD-NEXT:    blez a4, .LBB0_2
+; ZVABD-NEXT:  # %bb.4: # %for.body4.preheader
+; ZVABD-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; ZVABD-NEXT:    bgeu t1, t2, .LBB0_6
+; ZVABD-NEXT:  # %bb.5: # in Loop: Header=BB0_3 Depth=1
+; ZVABD-NEXT:    li s0, 0
+; ZVABD-NEXT:    j .LBB0_9
+; ZVABD-NEXT:  .LBB0_6: # %vector.ph
+; ZVABD-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; ZVABD-NEXT:    vmv1r.v v14, v12
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; ZVABD-NEXT:    vmv.s.x v14, a5
+; ZVABD-NEXT:    vmv2r.v v10, v8
+; ZVABD-NEXT:    vmv1r.v v10, v14
+; ZVABD-NEXT:    mv a5, a0
+; ZVABD-NEXT:    mv t5, t4
+; ZVABD-NEXT:    mv t6, t3
+; ZVABD-NEXT:  .LBB0_7: # %vector.body
+; ZVABD-NEXT:    # Parent Loop BB0_3 Depth=1
+; ZVABD-NEXT:    # => This Inner Loop Header: Depth=2
+; ZVABD-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
+; ZVABD-NEXT:    vle8.v v14, (a5)
+; ZVABD-NEXT:    vle8.v v15, (t5)
+; ZVABD-NEXT:    sub t6, t6, t2
+; ZVABD-NEXT:    add t5, t5, t2
+; ZVABD-NEXT:    vabdu.vv v14, v14, v15
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v15, v14
+; ZVABD-NEXT:    vwaddu.wv v10, v10, v15
+; ZVABD-NEXT:    add a5, a5, t2
+; ZVABD-NEXT:    bnez t6, .LBB0_7
+; ZVABD-NEXT:  # %bb.8: # %middle.block
+; ZVABD-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vredsum.vs v10, v10, v13
+; ZVABD-NEXT:    vmv.x.s a5, v10
+; ZVABD-NEXT:    mv s0, t3
+; ZVABD-NEXT:    beq t3, t1, .LBB0_2
+; ZVABD-NEXT:  .LBB0_9: # %for.body4.preheader31
+; ZVABD-NEXT:    # in Loop: Header=BB0_3 Depth=1
+; ZVABD-NEXT:    mul t6, a3, a7
+; ZVABD-NEXT:    add t5, t4, s0
+; ZVABD-NEXT:    add t6, t1, t6
+; ZVABD-NEXT:    add t6, a2, t6
+; ZVABD-NEXT:    add s0, a0, s0
+; ZVABD-NEXT:  .LBB0_10: # %for.body4
+; ZVABD-NEXT:    # Parent Loop BB0_3 Depth=1
+; ZVABD-NEXT:    # => This Inner Loop Header: Depth=2
+; ZVABD-NEXT:    lbu s1, 0(s0)
+; ZVABD-NEXT:    lbu s2, 0(t5)
+; ZVABD-NEXT:    addi t5, t5, 1
+; ZVABD-NEXT:    sub s1, s1, s2
+; ZVABD-NEXT:    srai s2, s1, 63
+; ZVABD-NEXT:    xor s1, s1, s2
+; ZVABD-NEXT:    sub a5, s2, a5
+; ZVABD-NEXT:    subw a5, s1, a5
+; ZVABD-NEXT:    addi s0, s0, 1
+; ZVABD-NEXT:    bne t5, t6, .LBB0_10
+; ZVABD-NEXT:    j .LBB0_2
+; ZVABD-NEXT:  .LBB0_11:
+; ZVABD-NEXT:    ld s0, 24(sp) # 8-byte Folded Reload
+; ZVABD-NEXT:    ld s1, 16(sp) # 8-byte Folded Reload
+; ZVABD-NEXT:    ld s2, 8(sp) # 8-byte Folded Reload
+; ZVABD-NEXT:    .cfi_restore s0
+; ZVABD-NEXT:    .cfi_restore s1
+; ZVABD-NEXT:    .cfi_restore s2
+; ZVABD-NEXT:    addi sp, sp, 32
+; ZVABD-NEXT:    .cfi_def_cfa_offset 0
+; ZVABD-NEXT:    mv a0, a5
+; ZVABD-NEXT:    ret
+; ZVABD-NEXT:  .LBB0_12:
+; ZVABD-NEXT:    li a0, 0
+; ZVABD-NEXT:    ret
+entry:
+  %cmp23 = icmp sgt i32 %i_height, 0
+  br i1 %cmp23, label %for.cond1.preheader.lr.ph, label %for.cond.cleanup
+
+for.cond1.preheader.lr.ph:                        ; preds = %entry
+  %cmp220 = icmp sgt i32 %i_width, 0
+  %idx.ext = sext i32 %i_stride_pix1 to i64
+  %idx.ext8 = sext i32 %i_stride_pix2 to i64
+  %wide.trip.count = zext i32 %i_width to i64
+  br label %for.cond1.preheader
+
+for.cond1.preheader:                              ; preds = %for.cond1.preheader.lr.ph, %for.cond.cleanup3
+  %y.027 = phi i32 [ 0, %for.cond1.preheader.lr.ph ], [ %inc11, %for.cond.cleanup3 ]
+  %i_sum.026 = phi i32 [ 0, %for.cond1.preheader.lr.ph ], [ %i_sum.1.lcssa, %for.cond.cleanup3 ]
+  %pix1.addr.025 = phi ptr [ %pix1, %for.cond1.preheader.lr.ph ], [ %add.ptr, %for.cond.cleanup3 ]
+  %pix2.addr.024 = phi ptr [ %pix2, %for.cond1.preheader.lr.ph ], [ %add.ptr9, %for.cond.cleanup3 ]
+  br i1 %cmp220, label %for.body4.preheader, label %for.cond.cleanup3
+
+for.body4.preheader:                              ; preds = %for.cond1.preheader
+  %0 = tail call i64 @llvm.vscale.i64()
+  %1 = shl nuw nsw i64 %0, 2
+  %min.iters.check = icmp samesign ugt i64 %1, %wide.trip.count
+  br i1 %min.iters.check, label %for.body4.preheader31, label %vector.ph
+
+vector.ph:                                        ; preds = %for.body4.preheader
+  %2 = tail call i64 @llvm.vscale.i64()
+  %.neg = mul nuw nsw i64 %2, 2147483644
+  %n.vec = and i64 %.neg, %wide.trip.count
+  %3 = tail call i64 @llvm.vscale.i64()
+  %4 = shl nuw nsw i64 %3, 2
+  %5 = insertelement <vscale x 4 x i32> zeroinitializer, i32 %i_sum.026, i64 0
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %vec.phi = phi <vscale x 4 x i32> [ %5, %vector.ph ], [ %12, %vector.body ]
+  %6 = getelementptr inbounds nuw i8, ptr %pix1.addr.025, i64 %index
+  %wide.load = load <vscale x 4 x i8>, ptr %6, align 1
+  %7 = zext <vscale x 4 x i8> %wide.load to <vscale x 4 x i32>
+  %8 = getelementptr inbounds nuw i8, ptr %pix2.addr.024, i64 %index
+  %wide.load30 = load <vscale x 4 x i8>, ptr %8, align 1
+  %9 = zext <vscale x 4 x i8> %wide.load30 to <vscale x 4 x i32>
+  %10 = sub nsw <vscale x 4 x i32> %7, %9
+  %11 = tail call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %10, i1 true)
+  %12 = add <vscale x 4 x i32> %11, %vec.phi
+  %index.next = add nuw i64 %index, %4
+  %13 = icmp eq i64 %index.next, %n.vec
+  br i1 %13, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %14 = tail call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %12)
+  %cmp.n = icmp eq i64 %n.vec, %wide.trip.count
+  br i1 %cmp.n, label %for.cond.cleanup3, label %for.body4.preheader31
+
+for.body4.preheader31:                            ; preds = %for.body4.preheader, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %for.body4.preheader ], [ %n.vec, %middle.block ]
+  %i_sum.121.ph = phi i32 [ %i_sum.026, %for.body4.preheader ], [ %14, %middle.block ]
+  br label %for.body4
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup3, %entry
+  %i_sum.0.lcssa = phi i32 [ 0, %entry ], [ %i_sum.1.lcssa, %for.cond.cleanup3 ]
+  ret i32 %i_sum.0.lcssa
+
+for.cond.cleanup3:                                ; preds = %for.body4, %middle.block, %for.cond1.preheader
+  %i_sum.1.lcssa = phi i32 [ %i_sum.026, %for.cond1.preheader ], [ %14, %middle.block ], [ %add, %for.body4 ]
+  %add.ptr = getelementptr inbounds i8, ptr %pix1.addr.025, i64 %idx.ext
+  %add.ptr9 = getelementptr inbounds i8, ptr %pix2.addr.024, i64 %idx.ext8
+  %inc11 = add nuw nsw i32 %y.027, 1
+  %exitcond29.not = icmp eq i32 %inc11, %i_height
+  br i1 %exitcond29.not, label %for.cond.cleanup, label %for.cond1.preheader
+
+for.body4:                                        ; preds = %for.body4.preheader31, %for.body4
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body4 ], [ %indvars.iv.ph, %for.body4.preheader31 ]
+  %i_sum.121 = phi i32 [ %add, %for.body4 ], [ %i_sum.121.ph, %for.body4.preheader31 ]
+  %arrayidx = getelementptr inbounds nuw i8, ptr %pix1.addr.025, i64 %indvars.iv
+  %15 = load i8, ptr %arrayidx, align 1
+  %conv = zext i8 %15 to i32
+  %arrayidx6 = getelementptr inbounds nuw i8, ptr %pix2.addr.024, i64 %indvars.iv
+  %16 = load i8, ptr %arrayidx6, align 1
+  %conv7 = zext i8 %16 to i32
+  %sub = sub nsw i32 %conv, %conv7
+  %17 = tail call i32 @llvm.abs.i32(i32 %sub, i1 true)
+  %add = add nsw i32 %17, %i_sum.121
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup3, label %for.body4
+}

>From 502adb300039c7863f87681ff7cef727f7e26f63 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Wed, 5 Nov 2025 19:04:30 +0800
Subject: [PATCH 08/10] [RISCV] Combine vwaddu_wv+vabd(u) to vwabdacc(u)

Note that we only support SEW=8/16 for `vwabdacc(u)`.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 63 ++++++++++++++++++++-
 llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll  | 13 ++---
 2 files changed, 68 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 171fc391a7aa8..37dc08e600e8b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -18812,6 +18812,64 @@ static SDValue performVWABDACombine(SDNode *N, SelectionDAG &DAG,
   return Result;
 }
 
+// vwaddu_wv C (vabd A B) -> vwabda(A B C)
+// vwaddu_wv C (vabdu A B) -> vwabdau(A B C)
+static SDValue performVWABDACombine_WV(SDNode *N, SelectionDAG &DAG,
+                                       const RISCVSubtarget &Subtarget) {
+  if (!Subtarget.hasStdExtZvabd())
+    return SDValue();
+
+  MVT VT = N->getSimpleValueType(0);
+  // The result is widened, so we can accept i32 here.
+  if (VT.getVectorElementType() == MVT::i64)
+    return SDValue();
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  SDValue Passthru = N->getOperand(2);
+  if (!Passthru->isUndef())
+    return SDValue();
+
+  SDValue Mask = N->getOperand(3);
+  SDValue VL = N->getOperand(4);
+  unsigned Ext = 0;
+  MVT ExtVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
+  auto IsABD = [&](SDValue Op) {
+    unsigned Opc = Op.getOpcode();
+    if (Opc != ISD::ABDS && Opc != ISD::ABDU) {
+      if ((Opc == RISCVISD::VZEXT_VL &&
+           Op->getOperand(0).getOpcode() == ISD::ABDU) ||
+          (Opc == RISCVISD::VSEXT_VL &&
+           Op->getOperand(0).getOpcode() == ISD::ABDS)) {
+        Ext = Opc;
+        ExtVT = Op->getSimpleValueType(0);
+        return Op->getOperand(0);
+      }
+      return SDValue();
+    }
+    return Op;
+  };
+
+  SDValue Diff = IsABD(Op0);
+  Diff = Diff ? Diff : IsABD(Op1);
+  if (!Diff)
+    return SDValue();
+  SDValue Acc = Diff == Op0 ? Op1 : Op0;
+
+  SDLoc DL(N);
+  SDValue DiffA = Diff.getOperand(0);
+  SDValue DiffB = Diff.getOperand(1);
+  if (Ext) {
+    DiffA = DAG.getNode(Ext, DL, ExtVT, DiffA, Mask, VL);
+    DiffB = DAG.getNode(Ext, DL, ExtVT, DiffB, Mask, VL);
+  }
+  SDValue Result =
+      DAG.getNode(Diff.getOpcode() == ISD::ABDS ? RISCVISD::VWABDA_VL
+                                                : RISCVISD::VWABDAU_VL,
+                  DL, VT, DiffA, DiffB, Acc, Mask, VL);
+  return Result;
+}
+
 static SDValue performVWADDSUBW_VLCombine(SDNode *N,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const RISCVSubtarget &Subtarget) {
@@ -21725,8 +21783,11 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     return combineToVWMACC(N, DAG, Subtarget);
   case RISCVISD::VWADDU_VL:
     return performVWABDACombine(N, DAG, Subtarget);
-  case RISCVISD::VWADD_W_VL:
   case RISCVISD::VWADDU_W_VL:
+    if (SDValue V = performVWABDACombine_WV(N, DAG, Subtarget))
+      return V;
+    [[fallthrough]];
+  case RISCVISD::VWADD_W_VL:
   case RISCVISD::VWSUB_W_VL:
   case RISCVISD::VWSUBU_W_VL:
     return performVWADDSUBW_VLCombine(N, DCI, Subtarget);
diff --git a/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll
index f32c9b72dc30a..f4af2fdc4cd72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vectors-sad.ll
@@ -185,15 +185,14 @@ define signext i32 @PIXEL_SAD_C(ptr %pix1, i32 %i_stride_pix1, ptr %pix2, i32 %i
 ; ZVABD-NEXT:  .LBB0_7: # %vector.body
 ; ZVABD-NEXT:    # Parent Loop BB0_3 Depth=1
 ; ZVABD-NEXT:    # => This Inner Loop Header: Depth=2
-; ZVABD-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; ZVABD-NEXT:    vle8.v v14, (a5)
-; ZVABD-NEXT:    vle8.v v15, (t5)
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v14, (t5)
+; ZVABD-NEXT:    vle8.v v15, (a5)
 ; ZVABD-NEXT:    sub t6, t6, t2
 ; ZVABD-NEXT:    add t5, t5, t2
-; ZVABD-NEXT:    vabdu.vv v14, v14, v15
-; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; ZVABD-NEXT:    vzext.vf2 v15, v14
-; ZVABD-NEXT:    vwaddu.wv v10, v10, v15
+; ZVABD-NEXT:    vzext.vf2 v16, v14
+; ZVABD-NEXT:    vzext.vf2 v14, v15
+; ZVABD-NEXT:    vwabdau.vv v10, v14, v16
 ; ZVABD-NEXT:    add a5, a5, t2
 ; ZVABD-NEXT:    bnez t6, .LBB0_7
 ; ZVABD-NEXT:  # %bb.8: # %middle.block

>From 69a43a86f37e5f0e4c60239ff529ca5d777b0e01 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Tue, 11 Nov 2025 17:19:39 +0800
Subject: [PATCH 09/10] [RISCV] Change Commutable to 0 for Zvabd

---
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 46261d83711cc..3536a79a97ef9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -46,7 +46,8 @@ multiclass PseudoVABS {
 multiclass VPseudoVWABD_VV {
   foreach m = MxListW in {
     defvar mx = m.MX;
-    defm "" : VPseudoTernaryW_VV<m, Commutable = 1>,
+    // TODO: These instructions are commutable, we should support it.
+    defm "" : VPseudoTernaryW_VV<m, Commutable = 0>,
               SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV",
                            "ReadVIWMulAddV", "ReadVIWMulAddV", mx>;
   }

>From dd0ce241da56232f86985129c9843063c50a7c16 Mon Sep 17 00:00:00 2001
From: Zhenxuan Sang <sang at bytedance.com>
Date: Tue, 3 Feb 2026 10:47:40 +0000
Subject: [PATCH 10/10] [Clang][RISCV] Add Zvabd intrinsics

---
 clang/include/clang/Basic/riscv_vector.td     |  18 +
 .../zvabd/non-policy/non-overloaded/vabd_vv.c | 139 ++++++
 .../non-policy/non-overloaded/vabdu_vv.c      | 139 ++++++
 .../zvabd/non-policy/non-overloaded/vabs_v.c  | 229 ++++++++++
 .../non-policy/non-overloaded/vwabdacc_vv.c   | 119 ++++++
 .../non-policy/non-overloaded/vwabdaccu_vv.c  | 119 ++++++
 .../zvabd/non-policy/overloaded/vabd_vv.c     | 139 ++++++
 .../zvabd/non-policy/overloaded/vabdu_vv.c    | 139 ++++++
 .../zvabd/non-policy/overloaded/vabs_v.c      | 229 ++++++++++
 .../zvabd/non-policy/overloaded/vwabdacc_vv.c | 119 ++++++
 .../non-policy/overloaded/vwabdaccu_vv.c      | 119 ++++++
 .../zvabd/policy/non-overloaded/vabd_vv.c     | 139 ++++++
 .../zvabd/policy/non-overloaded/vabdu_vv.c    | 139 ++++++
 .../zvabd/policy/non-overloaded/vabs_v.c      | 229 ++++++++++
 .../zvabd/policy/non-overloaded/vwabdacc_vv.c | 119 ++++++
 .../policy/non-overloaded/vwabdaccu_vv.c      | 119 ++++++
 .../zvabd/policy/overloaded/vabd_vv.c         | 139 ++++++
 .../zvabd/policy/overloaded/vabdu_vv.c        | 139 ++++++
 .../zvabd/policy/overloaded/vabs_v.c          | 229 ++++++++++
 .../zvabd/policy/overloaded/vwabdacc_vv.c     | 119 ++++++
 .../zvabd/policy/overloaded/vwabdaccu_vv.c    | 119 ++++++
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |  12 +
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  |   6 +
 llvm/test/CodeGen/RISCV/rvv/vabd.ll           | 238 +++++++++++
 llvm/test/CodeGen/RISCV/rvv/vabdu.ll          | 238 +++++++++++
 llvm/test/CodeGen/RISCV/rvv/vabs.ll           | 400 ++++++++++++++++++
 llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll       | 202 +++++++++
 llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll      | 202 +++++++++
 28 files changed, 4296 insertions(+)
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabd.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabdu.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vabs.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll

diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c899dc70fc0b7..e25ecfe2c2d27 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2043,6 +2043,24 @@ let UnMaskedPolicyScheme = HasPassthruOperand in {
     defm vclmul  : RVVInt64BinBuiltinSet;
     defm vclmulh : RVVInt64BinBuiltinSet;
   }
+
+  // zvabd
+  let RequiredFeatures = ["zvabd"] in {
+    defm vabs : RVVOutBuiltinSet<"vabs", "csil", [["v", "v", "vv"]]>;
+    defm vabd : RVVOutOp1BuiltinSet<"vabd", "cs", [["vv", "v", "vvv"]]>;
+    defm vabdu : RVVOutOp1BuiltinSet<"vabdu", "cs", [["vv", "Uv", "UvUvUv"]]>;
+  }
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand in {
+  let RequiredFeatures = ["zvabd"] in {
+    defm vwabdacc : RVVBuiltinSet<"vwabdacc", "cs",
+                                  [["vv", "w", "wwvv"]],
+                                  [-1, 1, 2]>;
+    defm vwabdaccu : RVVBuiltinSet<"vwabdaccu", "cs",
+                                   [["vv", "Uw", "UwUwUvUv"]],
+                                   [-1, 1, 2]>;
+  }
 }
 
 let UnMaskedPolicyScheme = HasPolicyOperand, HasMasked = false in {
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..b97b39057306a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m8(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..838c4d98d63e3
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m8(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
new file mode 100644
index 0000000000000..67751cb294739
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8mf8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16mf4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32mf2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m8(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m1(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m2(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m4(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m8(vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..a921f0868fd52
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m8(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..28ef213e00c1b
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/non-overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16mf4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m8(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32mf2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m1(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m2(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m4(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m8(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..39d4e33fcd907
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..850750a838b67
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu(vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
new file mode 100644
index 0000000000000..875828058741a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8(vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4(vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2(vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1(vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2(vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4(vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8(vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4(vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2(vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1(vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2(vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4(vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8(vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2(vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1(vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2(vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4(vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8(vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1(vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2(vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4(vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8(vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs(vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..b5623160d751a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..6726fafde1451
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/non-policy/overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..404ea228d4ede
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..a2c7c22e86a93
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
new file mode 100644
index 0000000000000..cd44b41a06904
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8mf8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i8m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16mf4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i16m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32mf2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i32m8_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m1_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m2_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m4_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_v_i64m8_tu(vd, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..024e6b07fe26f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..bc6ac486ff71e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/non-overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
new file mode 100644
index 0000000000000..0ac70e40ca8ec
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabd_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabd_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabd_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabd_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabd_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabd_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabd_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabd_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabd_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabd_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabd_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabd_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabd_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabd_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vabd_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
new file mode 100644
index 0000000000000..0b3415e65e498
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabdu_vv.c
@@ -0,0 +1,139 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabdu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabdu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vabdu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabdu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabdu.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vabdu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabdu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabdu.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vabdu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabdu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabdu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vabdu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabdu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabdu.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vabdu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabdu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabdu.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vabdu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabdu_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabdu.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vabdu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabdu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabdu.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vabdu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabdu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabdu.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vabdu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabdu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabdu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vabdu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabdu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabdu.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vabdu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabdu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabdu.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vabdu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabdu_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabdu.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vabdu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vabdu_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
new file mode 100644
index 0000000000000..d992f1a41089e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vabs_v.c
@@ -0,0 +1,229 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vabs_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vabs.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vabs_v_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vabs_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vabs.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vabs_v_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vabs_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vabs.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vabs_v_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vabs_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vabs.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vabs_v_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vabs_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vabs.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vabs_v_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vabs_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vabs.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vabs_v_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vabs_v_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vabs.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vabs_v_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vabs_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vabs.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vabs_v_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vabs_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vabs.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vabs_v_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vabs_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vabs.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vabs_v_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vabs_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vabs.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vabs_v_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vabs_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vabs.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vabs_v_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vabs_v_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vabs.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vabs_v_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vabs_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vabs.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vabs_v_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vabs_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vabs.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vabs_v_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vabs_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vabs.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vabs_v_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vabs_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vabs.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vabs_v_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vabs_v_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vabs.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vabs_v_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vabs_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vabs.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vabs_v_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vabs_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vabs.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vabs_v_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vabs_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vabs.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vabs_v_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vabs_v_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vabs.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vabs_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vl) {
+  return __riscv_vabs_tu(vd, vs2, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
new file mode 100644
index 0000000000000..eb403a9d33a6e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdacc_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdacc_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vwabdacc_vv_i8mf8_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdacc_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vwabdacc_vv_i8mf4_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdacc_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vwabdacc_vv_i8mf2_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdacc_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vwabdacc_vv_i8m1_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdacc_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vwabdacc_vv_i8m2_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdacc_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vwabdacc_vv_i8m4_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdacc_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vwabdacc_vv_i16mf4_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdacc_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vwabdacc_vv_i16mf2_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdacc_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vwabdacc_vv_i16m1_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdacc_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vwabdacc_vv_i16m2_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdacc_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vwabdacc_vv_i16m4_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdacc_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
new file mode 100644
index 0000000000000..ea4f25ec0e121
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvabd/policy/overloaded/vwabdaccu_vv.c
@@ -0,0 +1,119 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +experimental-zvabd \
+// RUN:   -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vwabdaccu_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vwabdaccu_vv_u8mf8_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vwabdaccu_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vwabdaccu_vv_u8mf4_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vwabdaccu_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vwabdaccu_vv_u8mf2_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vwabdaccu_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vwabdaccu_vv_u8m1_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vwabdaccu_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vwabdaccu_vv_u8m2_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vwabdaccu_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vwabdaccu_vv_u8m4_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vwabdaccu_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vwabdaccu_vv_u16mf4_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vwabdaccu_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vwabdaccu_vv_u16mf2_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vwabdaccu_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vwabdaccu_vv_u16m1_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vwabdaccu_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vwabdaccu_vv_u16m2_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vwabdaccu_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vwabdaccu_vv_u16m4_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vwabdaccu_tu(vd, vs2, vs1, vl);
+}
+
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f194ce99b52d1..d8f1d0a88c897 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1907,6 +1907,18 @@ let TargetPrefix = "riscv" in {
   def int_riscv_vsm3me   : RISCVBinaryAAXUnMasked;
 } // TargetPrefix = "riscv"
 
+//===----------------------------------------------------------------------===//
+// Zvabd - Vector Absolute Difference
+//===----------------------------------------------------------------------===//
+let TargetPrefix = "riscv" in {
+
+  defm vabs      : RISCVUnaryAA;
+  defm vabd      : RISCVBinaryAAX;
+  defm vabdu     : RISCVBinaryAAX;
+  defm vwabdacc  : RISCVTernaryWide;
+  defm vwabdaccu : RISCVTernaryWide;
+} // TargetPrefix = "riscv"
+
 //===----------------------------------------------------------------------===//
 // Zvqdotq - Vector quad widening 4D Dot Product
 //
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 3536a79a97ef9..7ccb56b7e5811 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -89,6 +89,12 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
 defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
 defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
 
+defm : VPatUnaryV_V<"int_riscv_vabs", "PseudoVABS", AllIntegerVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabd", "PseudoVABD", ABDIntVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vabdu", "PseudoVABDU", ABDIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdacc", "PseudoVWABDA", ABDAIntVectors>;
+defm : VPatTernaryW_VV<"int_riscv_vwabdaccu", "PseudoVWABDAU", ABDAIntVectors>;
+
 foreach vti = AllIntegerVectors in {
   def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
             (!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
new file mode 100644
index 0000000000000..24f242c64aab6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabd_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8mf8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+  ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabd.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabd_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+  ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabd.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabd_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+  ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabd.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabd_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+  ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabd.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabd_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+  ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabd.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabd_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+  ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabd.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabd_vv_i8m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i8m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+  ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabd.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabd_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+  ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabd.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabd_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+  ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabd.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabd_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+  ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabd.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabd_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+  ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabd.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabd_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+  ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabd.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; RV32-LABEL: vabd_vv_i16m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT:    vabd.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabd_vv_i16m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT:    vabd.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+  ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabd.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
new file mode 100644
index 0000000000000..da961efefe7f9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabdu_vv_i8mf8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8mf8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1)
+  ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabdu.vscalex1xi8.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabdu_vv_i8mf4(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1)
+  ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabdu.vscalex2xi8.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabdu_vv_i8mf2(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1)
+  ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabdu.vscalex4xi8.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabdu_vv_i8m1(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1)
+  ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabdu.vscalex8xi8.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabdu_vv_i8m2(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1)
+  ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabdu.vscalex16xi8.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabdu_vv_i8m4(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1)
+  ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabdu.vscalex32xi8.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b) {
+; RV32-LABEL: vabdu_vv_i8m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i8m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, iXLen -1)
+  ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabdu.vscalex64xi8.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabdu_vv_i16mf4(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i16mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1)
+  ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabdu.vscalex1xi16.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabdu_vv_i16mf2(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i16mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1)
+  ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabdu.vscalex2xi16.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabdu_vv_i16m1(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v9
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i16m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v9
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1)
+  ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabdu.vscalex4xi16.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabdu_vv_i16m2(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i16m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1)
+  ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabdu.vscalex8xi16.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabdu_vv_i16m4(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v12
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i16m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v12
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1)
+  ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabdu.vscalex16xi16.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b) {
+; RV32-LABEL: vabdu_vv_i16m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT:    vabdu.vv v8, v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabdu_vv_i16m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT:    vabdu.vv v8, v8, v16
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, iXLen -1)
+  ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabdu.vscalex32xi16.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vabs.ll b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
new file mode 100644
index 0000000000000..7be228baa37ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vabs.ll
@@ -0,0 +1,400 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i8> @vabs_v_i8mf8(<vscale x 1 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8mf8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8> poison, <vscale x 1 x i8> %a, iXLen -1)
+  ret <vscale x 1 x i8> %res
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vabs.vscalex1xi8(<vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
+
+define <vscale x 2 x i8> @vabs_v_i8mf4(<vscale x 2 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8> poison, <vscale x 2 x i8> %a, iXLen -1)
+  ret <vscale x 2 x i8> %res
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vabs.vscalex2xi8(<vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
+
+define <vscale x 4 x i8> @vabs_v_i8mf2(<vscale x 4 x i8> %a) {
+; RV32-LABEL: vabs_v_i8mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8> poison, <vscale x 4 x i8> %a, iXLen -1)
+  ret <vscale x 4 x i8> %res
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vabs.vscalex4xi8(<vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
+
+define <vscale x 8 x i8> @vabs_v_i8m1(<vscale x 8 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8> poison, <vscale x 8 x i8> %a, iXLen -1)
+  ret <vscale x 8 x i8> %res
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vabs.vscalex8xi8(<vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
+
+define <vscale x 16 x i8> @vabs_v_i8m2(<vscale x 16 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8> poison, <vscale x 16 x i8> %a, iXLen -1)
+  ret <vscale x 16 x i8> %res
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vabs.vscalex16xi8(<vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
+
+define <vscale x 32 x i8> @vabs_v_i8m4(<vscale x 32 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8> poison, <vscale x 32 x i8> %a, iXLen -1)
+  ret <vscale x 32 x i8> %res
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vabs.vscalex32xi8(<vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
+
+define <vscale x 64 x i8> @vabs_v_i8m8(<vscale x 64 x i8> %a) {
+; RV32-LABEL: vabs_v_i8m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i8m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8> poison, <vscale x 64 x i8> %a, iXLen -1)
+  ret <vscale x 64 x i8> %res
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vabs.vscalex64xi8(<vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
+
+define <vscale x 1 x i16> @vabs_v_i16mf4(<vscale x 1 x i16> %a) {
+; RV32-LABEL: vabs_v_i16mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i16mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16> poison, <vscale x 1 x i16> %a, iXLen -1)
+  ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vabs.vscalex1xi16(<vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
+
+define <vscale x 2 x i16> @vabs_v_i16mf2(<vscale x 2 x i16> %a) {
+; RV32-LABEL: vabs_v_i16mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i16mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16> poison, <vscale x 2 x i16> %a, iXLen -1)
+  ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vabs.vscalex2xi16(<vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
+
+define <vscale x 4 x i16> @vabs_v_i16m1(<vscale x 4 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i16m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16> poison, <vscale x 4 x i16> %a, iXLen -1)
+  ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vabs.vscalex4xi16(<vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
+
+define <vscale x 8 x i16> @vabs_v_i16m2(<vscale x 8 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i16m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16> poison, <vscale x 8 x i16> %a, iXLen -1)
+  ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vabs.vscalex8xi16(<vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
+
+define <vscale x 16 x i16> @vabs_v_i16m4(<vscale x 16 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i16m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16> poison, <vscale x 16 x i16> %a, iXLen -1)
+  ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vabs.vscalex16xi16(<vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
+
+define <vscale x 32 x i16> @vabs_v_i16m8(<vscale x 32 x i16> %a) {
+; RV32-LABEL: vabs_v_i16m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i16m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16> poison, <vscale x 32 x i16> %a, iXLen -1)
+  ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vabs.vscalex32xi16(<vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
+
+define <vscale x 1 x i32> @vabs_v_i32mf2(<vscale x 1 x i32> %a) {
+; RV32-LABEL: vabs_v_i32mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i32mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32> poison, <vscale x 1 x i32> %a, iXLen -1)
+  ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vabs.vscalex1xi32(<vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
+
+define <vscale x 2 x i32> @vabs_v_i32m1(<vscale x 2 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i32m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32> poison, <vscale x 2 x i32> %a, iXLen -1)
+  ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vabs.vscalex2xi32(<vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
+
+define <vscale x 4 x i32> @vabs_v_i32m2(<vscale x 4 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i32m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, iXLen -1)
+  ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vabs.vscalex4xi32(<vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
+
+define <vscale x 8 x i32> @vabs_v_i32m4(<vscale x 8 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i32m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32> poison, <vscale x 8 x i32> %a, iXLen -1)
+  ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vabs.vscalex8xi32(<vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
+
+define <vscale x 16 x i32> @vabs_v_i32m8(<vscale x 16 x i32> %a) {
+; RV32-LABEL: vabs_v_i32m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i32m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32> poison, <vscale x 16 x i32> %a, iXLen -1)
+  ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vabs.vscalex16xi32(<vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
+
+define <vscale x 1 x i64> @vabs_v_i64m1(<vscale x 1 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i64m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64> poison, <vscale x 1 x i64> %a, iXLen -1)
+  ret <vscale x 1 x i64> %res
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vabs.vscalex1xi64(<vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
+
+define <vscale x 2 x i64> @vabs_v_i64m2(<vscale x 2 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i64m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64> poison, <vscale x 2 x i64> %a, iXLen -1)
+  ret <vscale x 2 x i64> %res
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vabs.vscalex2xi64(<vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
+
+define <vscale x 4 x i64> @vabs_v_i64m4(<vscale x 4 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i64m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64> poison, <vscale x 4 x i64> %a, iXLen -1)
+  ret <vscale x 4 x i64> %res
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vabs.vscalex4xi64(<vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
+
+define <vscale x 8 x i64> @vabs_v_i64m8(<vscale x 8 x i64> %a) {
+; RV32-LABEL: vabs_v_i64m8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT:    vabs.v v8, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vabs_v_i64m8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64-NEXT:    vabs.v v8, v8
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64> poison, <vscale x 8 x i64> %a, iXLen -1)
+  ret <vscale x 8 x i64> %res
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vabs.vscalex8xi64(<vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
new file mode 100644
index 0000000000000..f09babbddae0e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdacc.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdacc_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwabdacc.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @vwabdacc_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwabdacc.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @vwabdacc_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i8mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwabdacc.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @vwabdacc_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v10, v11
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v10, v11
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwabdacc.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @vwabdacc_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v12, v14
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v12, v14
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwabdacc.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @vwabdacc_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdacc_vv_i8m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v16, v20
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i8m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v16, v20
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwabdacc.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @vwabdacc_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i16mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwabdacc.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @vwabdacc_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i16mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwabdacc.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @vwabdacc_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v10, v11
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v10, v11
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwabdacc.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @vwabdacc_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v12, v14
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v12, v14
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwabdacc.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @vwabdacc_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdacc_vv_i16m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT:    vwabda.vv v8, v16, v20
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdacc_vv_i16m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT:    vwabda.vv v8, v16, v20
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwabdacc.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
new file mode 100644
index 0000000000000..54c5e0a1b32a6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwabdaccu.ll
@@ -0,0 +1,202 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs | FileCheck %s --check-prefix=RV64
+
+define <vscale x 1 x i16> @vwabdaccu_vv_i8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf8, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf8, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %a, <vscale x 1 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 1 x i16> %res
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwabdaccu.vscalex1xi16.vscalex1xi8.vscalex1xi8(<vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
+
+define <vscale x 2 x i16> @vwabdaccu_vv_i8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf4, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf4, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %a, <vscale x 2 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 2 x i16> %res
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwabdaccu.vscalex2xi16.vscalex2xi8.vscalex2xi8(<vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
+
+define <vscale x 4 x i16> @vwabdaccu_vv_i8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, mf2, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, mf2, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %a, <vscale x 4 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 4 x i16> %res
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwabdaccu.vscalex4xi16.vscalex4xi8.vscalex4xi8(<vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
+
+define <vscale x 8 x i16> @vwabdaccu_vv_i8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m1, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v10, v11
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m1, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v10, v11
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %a, <vscale x 8 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 8 x i16> %res
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwabdaccu.vscalex8xi16.vscalex8xi8.vscalex8xi8(<vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
+
+define <vscale x 16 x i16> @vwabdaccu_vv_i8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v12, v14
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v12, v14
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 16 x i16> %res
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwabdaccu.vscalex16xi16.vscalex16xi8.vscalex16xi8(<vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
+
+define <vscale x 32 x i16> @vwabdaccu_vv_i8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b) {
+; RV32-LABEL: vwabdaccu_vv_i8m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e8, m4, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v16, v20
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i8m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e8, m4, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v16, v20
+; RV64-NEXT:    ret
+  %res = call <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %a, <vscale x 32 x i8> %b, iXLen -1, iXLen 0)
+  ret <vscale x 32 x i16> %res
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwabdaccu.vscalex32xi16.vscalex32xi8.vscalex32xi8(<vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
+
+define <vscale x 1 x i32> @vwabdaccu_vv_i16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16mf4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16mf4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %a, <vscale x 1 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 1 x i32> %res
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwabdaccu.vscalex1xi32.vscalex1xi16.vscalex1xi16(<vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
+
+define <vscale x 2 x i32> @vwabdaccu_vv_i16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16mf2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v9, v10
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16mf2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v9, v10
+; RV64-NEXT:    ret
+  %res = call <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %a, <vscale x 2 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 2 x i32> %res
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwabdaccu.vscalex2xi32.vscalex2xi16.vscalex2xi16(<vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
+
+define <vscale x 4 x i32> @vwabdaccu_vv_i16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v10, v11
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v10, v11
+; RV64-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %a, <vscale x 4 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 4 x i32> %res
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwabdaccu.vscalex4xi32.vscalex4xi16.vscalex4xi16(<vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
+
+define <vscale x 8 x i32> @vwabdaccu_vv_i16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m2:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v12, v14
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m2:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m2, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v12, v14
+; RV64-NEXT:    ret
+  %res = call <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 8 x i32> %res
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwabdaccu.vscalex8xi32.vscalex8xi16.vscalex8xi16(<vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
+
+define <vscale x 16 x i32> @vwabdaccu_vv_i16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b) {
+; RV32-LABEL: vwabdaccu_vv_i16m4:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
+; RV32-NEXT:    vwabdau.vv v8, v16, v20
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwabdaccu_vv_i16m4:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e16, m4, tu, ma
+; RV64-NEXT:    vwabdau.vv v8, v16, v20
+; RV64-NEXT:    ret
+  %res = call <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %a, <vscale x 16 x i16> %b, iXLen -1, iXLen 0)
+  ret <vscale x 16 x i32> %res
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwabdaccu.vscalex16xi32.vscalex16xi16.vscalex16xi16(<vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
+



More information about the cfe-commits mailing list