[clang] [llvm] [RISCV] Support for Zvabd fast-track proposal (PR #124239)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 16 04:05:04 PDT 2025


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/124239

>From 1b1af0a4711a9ceaf1932139ecd7a402d3a4be29 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Tue, 16 Jul 2024 16:08:16 +0800
Subject: [PATCH 1/4] [RISCV][MC] Support Zvabd instructions

Support of these instructions are added:

- Vector Single-Width Signed/Unsigned Integer Absolute Difference
- Vector Widening Signed/Unsigned Integer Absolute Difference and
  Accumulate

Doc: https://riscv.atlassian.net/wiki/spaces/VXXX/pages/166690866/Fast-Track+Proposal+for+integer+vector+absolute+difference+instructions
---
 .../Driver/print-supported-extensions-riscv.c |  1 +
 .../test/Preprocessor/riscv-target-features.c |  9 +++
 llvm/lib/Target/RISCV/RISCVFeatures.td        |  6 ++
 llvm/lib/Target/RISCV/RISCVInstrInfo.td       |  1 +
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  | 27 ++++++++
 llvm/test/CodeGen/RISCV/attributes.ll         |  4 ++
 llvm/test/CodeGen/RISCV/features-info.ll      |  1 +
 llvm/test/MC/RISCV/rvv/zvabd-invalid.s        | 10 +++
 llvm/test/MC/RISCV/rvv/zvabd.s                | 63 +++++++++++++++++++
 .../TargetParser/RISCVISAInfoTest.cpp         |  1 +
 10 files changed, 123 insertions(+)
 create mode 100644 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
 create mode 100644 llvm/test/MC/RISCV/rvv/zvabd-invalid.s
 create mode 100644 llvm/test/MC/RISCV/rvv/zvabd.s

diff --git a/clang/test/Driver/print-supported-extensions-riscv.c b/clang/test/Driver/print-supported-extensions-riscv.c
index 3fa5ef9afd143..30f74469b3218 100644
--- a/clang/test/Driver/print-supported-extensions-riscv.c
+++ b/clang/test/Driver/print-supported-extensions-riscv.c
@@ -214,6 +214,7 @@
 // CHECK-NEXT:     zicfilp              1.0       'Zicfilp' (Landing pad)
 // CHECK-NEXT:     zicfiss              1.0       'Zicfiss' (Shadow stack)
 // CHECK-NEXT:     zalasr               0.1       'Zalasr' (Load-Acquire and Store-Release Instructions)
+// CHECK-NEXT:     zvabd                0.2       'Zvabd' (Vector Absolute Difference)
 // CHECK-NEXT:     zvbc32e              0.7       'Zvbc32e' (Vector Carryless Multiplication with 32-bits elements)
 // CHECK-NEXT:     zvfbfa               0.1       'Zvfbfa' (Additional BF16 vector compute support)
 // CHECK-NEXT:     zvkgs                0.7       'Zvkgs' (Vector-Scalar GCM instructions for Cryptography)
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 204c9851e680c..0330054b70260 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -125,6 +125,7 @@
 // CHECK-NOT: __riscv_zksh {{.*$}}
 // CHECK-NOT: __riscv_zkt {{.*$}}
 // CHECK-NOT: __riscv_zmmul {{.*$}}
+// CHECK-NOT: __riscv_zvabd {{.*$}}
 // CHECK-NOT: __riscv_zvbb {{.*$}}
 // CHECK-NOT: __riscv_zvbc {{.*$}}
 // CHECK-NOT: __riscv_zve32f {{.*$}}
@@ -1339,6 +1340,14 @@
 // RUN:   -o - | FileCheck --check-prefix=CHECK-ZFA-EXT %s
 // CHECK-ZFA-EXT: __riscv_zfa 1000000{{$}}
 
+// RUN: %clang --target=riscv32 -menable-experimental-extensions \
+// RUN:   -march=rv32i_zve64x_zvabd0p2 -E -dM %s \
+// RUN:   -o - | FileCheck --check-prefix=CHECK-ZVABD-EXT %s
+// RUN: %clang --target=riscv64 -menable-experimental-extensions \
+// RUN:   -march=rv64i_zve64x_zvabd0p2 -E -dM %s \
+// RUN:   -o - | FileCheck --check-prefix=CHECK-ZVABD-EXT %s
+// CHECK-ZVABD-EXT: __riscv_zvabd  2000{{$}}
+
 // RUN: %clang --target=riscv32 \
 // RUN:   -march=rv32i_zve64x_zvbb1p0 -E -dM %s \
 // RUN:   -o - | FileCheck --check-prefix=CHECK-ZVBB-EXT %s
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index fa8272b239d99..f5e933c1f82a4 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -680,6 +680,12 @@ def FeatureStdExtV
                      [FeatureStdExtZvl128b, FeatureStdExtZve64d]>,
       RISCVExtensionBitmask<0, 21>;
 
+def FeatureStdExtZvabd
+    : RISCVExperimentalExtension<0, 2, "Vector Absolute Difference">;
+def HasStdExtZvabd : Predicate<"Subtarget->hasStdExtZvabd()">,
+                     AssemblerPredicate<(all_of FeatureStdExtZvabd),
+                                        "'Zvabd' (Vector Absolute Difference)">;
+
 def FeatureStdExtZvfbfa
     : RISCVExperimentalExtension<0, 1, "Additional BF16 vector compute support",
                                  [FeatureStdExtZve32f, FeatureStdExtZfbfmin]>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 23f5a848137c4..5225c9646c3ae 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -2347,6 +2347,7 @@ include "RISCVInstrInfoZk.td"
 
 // Vector
 include "RISCVInstrInfoV.td"
+include "RISCVInstrInfoZvabd.td"
 include "RISCVInstrInfoZvk.td"
 include "RISCVInstrInfoZvqdotq.td"
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
new file mode 100644
index 0000000000000..16d8a45b0cd1e
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -0,0 +1,27 @@
+//===-- RISCVInstrInfoZvabd.td - 'Zvabd' instructions ------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file describes the RISC-V instructions for 'Zvabd' (Vector Absolute
+/// Difference).
+///
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction Definitions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZvabd] in {
+  defm VABS_V : VALU_MV_VS2<"vabs.v", 0b010010, 0b10000>;
+
+  def VABD_VV  : VALUVV<0b010001, OPMVV, "vabd.vv">;
+  def VABDU_VV : VALUVV<0b010011, OPMVV, "vabdu.vv">;
+
+  let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
+    def VWABDACC_VV  : VALUVV<0b010101, OPMVV, "vwabdacc.vv">;
+    def VWABDACCU_VV : VALUVV<0b010110, OPMVV, "vwabdaccu.vv">;
+  } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
+} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index bbe7bec8673f4..c9461ed4de283 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -134,6 +134,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+zvkt %s -o - | FileCheck --check-prefix=RV32ZVKT %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvqdotq %s -o - | FileCheck --check-prefix=RV32ZVQDOTQ %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zvfh %s -o - | FileCheck --check-prefix=RV32ZVFH %s
+; RUN: llc -mtriple=riscv32 -mattr=+zve32x -mattr=+experimental-zvabd %s -o - | FileCheck --check-prefix=RV32ZVABD %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zicond %s -o - | FileCheck --check-prefix=RV32ZICOND %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zilsd %s -o - | FileCheck --check-prefix=RV32ZILSD %s
 ; RUN: llc -mtriple=riscv32 -mattr=+zimop %s -o - | FileCheck --check-prefix=RV32ZIMOP %s
@@ -289,6 +290,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+zvkt %s -o - | FileCheck --check-prefix=RV64ZVKT %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvqdotq %s -o - | FileCheck --check-prefix=RV64ZVQDOTQ %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zvfh %s -o - | FileCheck --check-prefix=RV64ZVFH %s
+; RUN: llc -mtriple=riscv64 -mattr=+zve32x -mattr=+experimental-zvabd %s -o - | FileCheck --check-prefix=RV64ZVABD %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zicond %s -o - | FileCheck --check-prefix=RV64ZICOND %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zimop %s -o - | FileCheck --check-prefix=RV64ZIMOP %s
 ; RUN: llc -mtriple=riscv64 -mattr=+zcmop %s -o - | FileCheck --check-prefix=RV64ZCMOP %s
@@ -474,6 +476,7 @@
 ; RV32ZVKT: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
 ; RV32ZVQDOTQ: .attribute 5, "rv32i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_zvqdotq0p0"
 ; RV32ZVFH: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfhmin1p0_zve32f1p0_zve32x1p0_zvfh1p0_zvfhmin1p0_zvl32b1p0"
+; RV32ZVABD: .attribute 5, "rv32i2p1_zicsr2p0_zvabd0p2_zve32x1p0_zvl32b1p0"
 ; RV32ZICOND: .attribute 5, "rv32i2p1_zicond1p0"
 ; RV32ZILSD: .attribute 5, "rv32i2p1_zilsd1p0"
 ; RV32ZIMOP: .attribute 5, "rv32i2p1_zimop1p0"
@@ -627,6 +630,7 @@
 ; RV64ZVKT: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvkt1p0_zvl32b1p0"
 ; RV64ZVQDOTQ: .attribute 5, "rv64i2p1_zicsr2p0_zve32x1p0_zvl32b1p0_zvqdotq0p0"
 ; RV64ZVFH: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfhmin1p0_zve32f1p0_zve32x1p0_zvfh1p0_zvfhmin1p0_zvl32b1p0"
+; RV64ZVABD: .attribute 5, "rv64i2p1_zicsr2p0_zvabd0p2_zve32x1p0_zvl32b1p0"
 ; RV64ZICOND: .attribute 5, "rv64i2p1_zicond1p0"
 ; RV64ZIMOP: .attribute 5, "rv64i2p1_zimop1p0"
 ; RV64ZCMOP: .attribute 5, "rv64i2p1_zca1p0_zcmop1p0"
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index d13a5a4e2b9be..13778fe297f12 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -54,6 +54,7 @@
 ; CHECK-NEXT:   experimental-zalasr              - 'Zalasr' (Load-Acquire and Store-Release Instructions).
 ; CHECK-NEXT:   experimental-zicfilp             - 'Zicfilp' (Landing pad).
 ; CHECK-NEXT:   experimental-zicfiss             - 'Zicfiss' (Shadow stack).
+; CHECK-NEXT:   experimental-zvabd               - 'Zvabd' (Vector Absolute Difference).
 ; CHECK-NEXT:   experimental-zvbc32e             - 'Zvbc32e' (Vector Carryless Multiplication with 32-bits elements).
 ; CHECK-NEXT:   experimental-zvfbfa              - 'Zvfbfa' (Additional BF16 vector compute support).
 ; CHECK-NEXT:   experimental-zvkgs               - 'Zvkgs' (Vector-Scalar GCM instructions for Cryptography).
diff --git a/llvm/test/MC/RISCV/rvv/zvabd-invalid.s b/llvm/test/MC/RISCV/rvv/zvabd-invalid.s
new file mode 100644
index 0000000000000..3673a4c237ccd
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd-invalid.s
@@ -0,0 +1,10 @@
+# RUN: not llvm-mc -triple=riscv64 --mattr=+zve64x --mattr=+experimental-zvabd %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+
+vwabdacc.vv v9, v9, v8
+# CHECK-ERROR: [[@LINE-1]]:13: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdacc.vv v9, v9, v8
+
+vwabdaccu.vv v9, v9, v8
+# CHECK-ERROR: [[@LINE-1]]:14: error: the destination vector register group cannot overlap the source vector register group
+# CHECK-ERROR-LABEL: vwabdaccu.vv v9, v9, v8
diff --git a/llvm/test/MC/RISCV/rvv/zvabd.s b/llvm/test/MC/RISCV/rvv/zvabd.s
new file mode 100644
index 0000000000000..57d3577cdbf0c
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/zvabd.s
@@ -0,0 +1,63 @@
+# RUN: llvm-mc -triple=riscv32 -show-encoding --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN:        | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: not llvm-mc -triple=riscv32 -show-encoding %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv32 -filetype=obj --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN:        | llvm-objdump -d --mattr=+v --mattr=+experimental-zvabd --no-print-imm-hex  - \
+# RUN:        | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv32 -filetype=obj --mattr=+v --mattr=+experimental-zvabd %s \
+# RUN:        | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+
+vabs.v v9, v8
+# CHECK-INST: vabs.v v9, v8
+# CHECK-ENCODING: [0xd7,0x24,0x88,0x4a]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4a8824d7 <unknown>
+
+vabd.vv v10, v9, v8
+# CHECK-INST: vabd.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 46942557 <unknown>
+
+vabd.vv v10, v9, v8, v0.t
+# CHECK-INST: vabd.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 44942557 <unknown>
+
+vabdu.vv v10, v9, v8
+# CHECK-INST: vabdu.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4e942557 <unknown>
+
+vabdu.vv v10, v9, v8, v0.t
+# CHECK-INST: vabdu.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 4c942557 <unknown>
+
+vwabdacc.vv v10, v9, v8
+# CHECK-INST: vwabdacc.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x56]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 56942557 <unknown>
+
+vwabdacc.vv v10, v9, v8, v0.t
+# CHECK-INST: vwabdacc.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x54]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 54942557 <unknown>
+
+vwabdaccu.vv v10, v9, v8
+# CHECK-INST: vwabdaccu.vv v10, v9, v8
+# CHECK-ENCODING: [0x57,0x25,0x94,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 5a942557 <unknown>
+
+vwabdaccu.vv v10, v9, v8, v0.t
+# CHECK-INST: vwabdaccu.vv v10, v9, v8, v0.t
+# CHECK-ENCODING: [0x57,0x25,0x94,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvabd' (Vector Absolute Difference){{$}}
+# CHECK-UNKNOWN: 58942557 <unknown>
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index cb4fbcae2f4da..d31223ad13a0c 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -1185,6 +1185,7 @@ Experimental extensions
     zicfilp              1.0       This is a long dummy description
     zicfiss              1.0
     zalasr               0.1
+    zvabd                0.2
     zvbc32e              0.7
     zvfbfa               0.1
     zvkgs                0.7

>From 452d5ec9831a3b85b85a8bc3456ed0c19ab6a94f Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Fri, 24 Jan 2025 15:55:53 +0800
Subject: [PATCH 2/4] [RISCV][CodeGen] Lowering abds/abdu to Zvabd instructions

We directly lower `ISD::ABDS`/`ISD::ABDU` to Zvabd instructions.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  10 +-
 .../Target/RISCV/RISCVInstrInfoVPseudos.td    |  13 +-
 .../Target/RISCV/RISCVInstrInfoVSDPatterns.td |  28 +-
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td |  31 +-
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  |  24 ++
 llvm/test/CodeGen/RISCV/rvv/abd.ll            | 140 +++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-abd.ll    | 284 ++++++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-sad.ll    |  83 +++++
 8 files changed, 586 insertions(+), 27 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9115c1385d6df..802b7634ad32a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -880,7 +880,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
                          Legal);
 
-      setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
+      if (Subtarget.hasStdExtZvabd())
+        setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+      else
+        setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
 
       // Custom-lower extensions and truncations from/to mask types.
       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
@@ -7039,6 +7042,8 @@ static unsigned getRISCVVLOp(SDValue Op) {
   OP_CASE(SMAX)
   OP_CASE(UMIN)
   OP_CASE(UMAX)
+  OP_CASE(ABDS)
+  OP_CASE(ABDU)
   OP_CASE(STRICT_FADD)
   OP_CASE(STRICT_FSUB)
   OP_CASE(STRICT_FMUL)
@@ -8271,6 +8276,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     return lowerToScalableOp(Op, DAG);
   case ISD::ABDS:
   case ISD::ABDU: {
+    if (Subtarget.hasStdExtZvabd())
+      return lowerToScalableOp(Op, DAG);
+
     SDLoc dl(Op);
     EVT VT = Op->getValueType(0);
     SDValue LHS = DAG.getFreeze(Op->getOperand(0));
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 03e6f43a38945..c2b2e0e5816ed 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2805,15 +2805,18 @@ multiclass VPseudoVFRDIV_VF_RM {
   }
 }
 
-multiclass VPseudoVALU_VV_VX {
- foreach m = MxList in {
-    defm "" : VPseudoBinaryV_VV<m>,
+multiclass VPseudoVALU_VV<bit Commutable = 0> {
+  foreach m = MxList in
+  defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
             SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", m.MX,
                         forcePassthruRead=true>;
-    defm "" : VPseudoBinaryV_VX<m>,
+}
+
+multiclass VPseudoVALU_VV_VX<bit Commutable = 0> : VPseudoVALU_VV<Commutable> {
+  foreach m = MxList in
+  defm "" : VPseudoBinaryV_VX<m>,
             SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", m.MX,
                         forcePassthruRead=true>;
-  }
 }
 
 multiclass VPseudoVSGNJ_VV_VF {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index cc2977c329de1..70a67a3234608 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -123,19 +123,27 @@ class VPatBinarySDNode_XI<SDPatternOperator vop,
                      xop_kind:$rs2,
                      avl, log2sew, TA_MA)>;
 
+multiclass VPatBinarySDNode_VV<SDPatternOperator vop, string instruction_name,
+                               list<VTypeInfo> vtilist = AllIntegerVectors,
+                               bit isSEWAware = 0> {
+  foreach vti = vtilist in {
+    let Predicates = GetVTypePredicates<vti>.Predicates in
+    def : VPatBinarySDNode_VV<vop, instruction_name,
+                              vti.Vector, vti.Vector, vti.Log2SEW,
+                              vti.LMul, vti.AVL, vti.RegClass, isSEWAware>;
+  }
+}
+
 multiclass VPatBinarySDNode_VV_VX<SDPatternOperator vop, string instruction_name,
                                   list<VTypeInfo> vtilist = AllIntegerVectors,
-                                  bit isSEWAware = 0> {
+                                  bit isSEWAware = 0>
+    : VPatBinarySDNode_VV<vop, instruction_name, vtilist, isSEWAware> {
   foreach vti = vtilist in {
-    let Predicates = GetVTypePredicates<vti>.Predicates in {
-      def : VPatBinarySDNode_VV<vop, instruction_name,
-                                vti.Vector, vti.Vector, vti.Log2SEW,
-                                vti.LMul, vti.AVL, vti.RegClass, isSEWAware>;
-      def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
-                                vti.Vector, vti.Vector, vti.Log2SEW,
-                                vti.LMul, vti.AVL, vti.RegClass,
-                                SplatPat, GPR, isSEWAware>;
-    }
+    let Predicates = GetVTypePredicates<vti>.Predicates in
+    def : VPatBinarySDNode_XI<vop, instruction_name, "VX",
+                              vti.Vector, vti.Vector, vti.Log2SEW,
+                              vti.LMul, vti.AVL, vti.RegClass,
+                              SplatPat, GPR, isSEWAware>;
   }
 }
 
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index acbccddce2b52..9f4c6abbf4e24 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -939,23 +939,32 @@ class VPatBinaryVL_XI<SDPatternOperator vop,
                    xop_kind:$rs2,
                    (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>;
 
-multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name,
-                              list<VTypeInfo> vtilist = AllIntegerVectors,
-                              bit isSEWAware = 0> {
+multiclass VPatBinaryVL_VV<SDPatternOperator vop, string instruction_name,
+                           list<VTypeInfo> vtilist = AllIntegerVectors,
+                           bit isSEWAware = 0> {
   foreach vti = vtilist in {
     let Predicates = GetVTypePredicates<vti>.Predicates in {
-      def : VPatBinaryVL_V<vop, instruction_name, "VV",
-                           vti.Vector, vti.Vector, vti.Vector, vti.Mask,
-                           vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
-                           vti.RegClass, isSEWAware>;
-      def : VPatBinaryVL_XI<vop, instruction_name, "VX",
-                            vti.Vector, vti.Vector, vti.Vector, vti.Mask,
-                            vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
-                            SplatPat, GPR, isSEWAware>;
+    def : VPatBinaryVL_V<vop, instruction_name, "VV",
+                         vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+                         vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                         vti.RegClass, isSEWAware>;
     }
   }
 }
 
+multiclass VPatBinaryVL_VV_VX<SDPatternOperator vop, string instruction_name,
+                              list<VTypeInfo> vtilist = AllIntegerVectors,
+                              bit isSEWAware = 0> 
+    : VPatBinaryVL_VV<vop, instruction_name, vtilist, isSEWAware>{
+  foreach vti = vtilist in {
+    let Predicates = GetVTypePredicates<vti>.Predicates in
+    def : VPatBinaryVL_XI<vop, instruction_name, "VX",
+                          vti.Vector, vti.Vector, vti.Vector, vti.Mask,
+                          vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass,
+                          SplatPat, GPR, isSEWAware>;
+  }
+}
+
 multiclass VPatBinaryVL_VV_VX_VI<SDPatternOperator vop, string instruction_name,
                                  Operand ImmType = simm5>
     : VPatBinaryVL_VV_VX<vop, instruction_name> {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 16d8a45b0cd1e..a5243e847d728 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -25,3 +25,27 @@ let Predicates = [HasStdExtZvabd] in {
     def VWABDACCU_VV : VALUVV<0b010110, OPMVV, "vwabdaccu.vv">;
   } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
 } // Predicates = [HasStdExtZvabd]
+
+//===----------------------------------------------------------------------===//
+// Pseudos
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtZvabd] in {
+  defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
+  defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
+} // Predicates = [HasStdExtZvabd]
+
+//===----------------------------------------------------------------------===//
+// CodeGen Patterns
+//===----------------------------------------------------------------------===//
+let HasPassthruOp = true, HasMaskOp = true in {
+def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+} // let HasPassthruOp = true, HasMaskOp = true
+
+let Predicates = [HasStdExtZvabd] in {
+defm : VPatBinarySDNode_VV<abds, "PseudoVABD">;
+defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU">;
+
+defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD">;
+defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU">;
+} // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/abd.ll b/llvm/test/CodeGen/RISCV/rvv/abd.ll
index be4292c9902eb..d5c94e274b4a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abd.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV64
 
 ;
 ; SABD
@@ -14,6 +16,12 @@ define <vscale x 16 x i8> @sabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %sub = sub <vscale x 16 x i16> %a.sext, %b.sext
@@ -30,6 +38,14 @@ define <vscale x 16 x i8> @sabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_b_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vmxor.mm v0, v0, v8
+; ZVABD-NEXT:    vmv.v.i v8, 0
+; ZVABD-NEXT:    vmerge.vim v8, v8, 1, v0
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8>
   %sub = sub <vscale x 16 x i8> %a.sext, %b.sext
@@ -45,6 +61,12 @@ define <vscale x 8 x i16> @sabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %sub = sub <vscale x 8 x i32> %a.sext, %b.sext
@@ -63,6 +85,14 @@ define <vscale x 8 x i16> @sabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
   %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %sub = sub <vscale x 8 x i16> %a.sext, %b.sext
@@ -78,6 +108,12 @@ define <vscale x 4 x i32> @sabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %sub = sub <vscale x 4 x i64> %a.sext, %b.sext
@@ -96,6 +132,14 @@ define <vscale x 4 x i32> @sabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 4 x i16> %a to <vscale x 4 x i32>
   %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.sext, %b.sext
@@ -111,6 +155,12 @@ define <vscale x 2 x i64> @sabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %sub = sub <vscale x 2 x i128> %a.sext, %b.sext
@@ -129,6 +179,14 @@ define <vscale x 2 x i64> @sabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.sext = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
   %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
   %sub = sub <vscale x 2 x i64> %a.sext, %b.sext
@@ -148,6 +206,12 @@ define <vscale x 16 x i8> @uabd_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 16 x i8> %a to <vscale x 16 x i16>
   %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16>
   %sub = sub <vscale x 16 x i16> %a.zext, %b.zext
@@ -164,6 +228,14 @@ define <vscale x 16 x i8> @uabd_b_promoted_ops(<vscale x 16 x i1> %a, <vscale x
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_b_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; ZVABD-NEXT:    vmxor.mm v0, v0, v8
+; ZVABD-NEXT:    vmv.v.i v8, 0
+; ZVABD-NEXT:    vmerge.vim v8, v8, 1, v0
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 16 x i1> %a to <vscale x 16 x i8>
   %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8>
   %sub = sub <vscale x 16 x i8> %a.zext, %b.zext
@@ -179,6 +251,12 @@ define <vscale x 8 x i16> @uabd_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 8 x i16> %a to <vscale x 8 x i32>
   %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32>
   %sub = sub <vscale x 8 x i32> %a.zext, %b.zext
@@ -197,6 +275,14 @@ define <vscale x 8 x i16> @uabd_h_promoted_ops(<vscale x 8 x i8> %a, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
   %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
   %sub = sub <vscale x 8 x i16> %a.zext, %b.zext
@@ -212,6 +298,12 @@ define <vscale x 4 x i32> @uabd_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64>
   %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
@@ -230,6 +322,14 @@ define <vscale x 4 x i32> @uabd_s_promoted_ops(<vscale x 4 x i16> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
   %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -245,6 +345,12 @@ define <vscale x 2 x i64> @uabd_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v10
 ; CHECK-NEXT:    vsub.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 2 x i64> %a to <vscale x 2 x i128>
   %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128>
   %sub = sub <vscale x 2 x i128> %a.zext, %b.zext
@@ -263,6 +369,14 @@ define <vscale x 2 x i64> @uabd_d_promoted_ops(<vscale x 2 x i32> %a, <vscale x
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v10, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
   %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
   %sub = sub <vscale x 2 x i64> %a.zext, %b.zext
@@ -281,6 +395,13 @@ define <vscale x 4 x i32> @uabd_non_matching_extension(<vscale x 4 x i32> %a, <v
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v12
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_non_matching_extension:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v12, v10
+; ZVABD-NEXT:    vabdu.vv v8, v8, v12
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i32> %a to <vscale x 4 x i64>
   %b.zext = zext <vscale x 4 x i8> %b to <vscale x 4 x i64>
   %sub = sub <vscale x 4 x i64> %a.zext, %b.zext
@@ -302,6 +423,15 @@ define <vscale x 4 x i32> @uabd_non_matching_promoted_ops(<vscale x 4 x i8> %a,
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_non_matching_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v10, v8
+; ZVABD-NEXT:    vabdu.vv v10, v10, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
   %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -321,6 +451,14 @@ define <vscale x 4 x i32> @uabd_non_matching_promotion(<vscale x 4 x i8> %a, <vs
 ; CHECK-NEXT:    vmax.vv v10, v10, v12
 ; CHECK-NEXT:    vsub.vv v8, v10, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_non_matching_promotion:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v10, v8
+; ZVABD-NEXT:    vsext.vf4 v12, v9
+; ZVABD-NEXT:    vabd.vv v8, v10, v12
+; ZVABD-NEXT:    ret
   %a.zext = zext <vscale x 4 x i8> %a to <vscale x 4 x i32>
   %b.zext = sext <vscale x 4 x i8> %b to <vscale x 4 x i32>
   %sub = sub <vscale x 4 x i32> %a.zext, %b.zext
@@ -343,3 +481,5 @@ declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}
 ; RV64: {{.*}}
+; ZVABD-RV32: {{.*}}
+; ZVABD-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index bd1209a17b534..f61c17e18b022 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+experimental-zvabd -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD,ZVABD-RV64
 ;
 ; SABD
 ;
@@ -14,6 +16,12 @@ define <8 x i8> @sabd_8b_as_16b(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8b_as_16b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i8> %a to <8 x i16>
   %b.sext = sext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.sext, %b.sext
@@ -31,6 +39,12 @@ define <8 x i8> @sabd_8b_as_32b(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8b_as_32b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i8> %a to <8 x i32>
   %b.sext = sext <8 x i8> %b to <8 x i32>
   %sub = sub <8 x i32> %a.sext, %b.sext
@@ -48,6 +62,12 @@ define <16 x i8> @sabd_16b(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_16b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <16 x i8> %a to <16 x i16>
   %b.sext = sext <16 x i8> %b to <16 x i16>
   %sub = sub <16 x i16> %a.sext, %b.sext
@@ -65,6 +85,12 @@ define <4 x i16> @sabd_4h(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i16> %a to <4 x i32>
   %b.sext = sext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.sext, %b.sext
@@ -84,6 +110,14 @@ define <4 x i16> @sabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i8> %a to <4 x i16>
   %b.sext = sext <4 x i8> %b to <4 x i16>
   %sub = sub <4 x i16> %a.sext, %b.sext
@@ -100,6 +134,12 @@ define <8 x i16> @sabd_8h(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i16> %a to <8 x i32>
   %b.sext = sext <8 x i16> %b to <8 x i32>
   %sub = sub <8 x i32> %a.sext, %b.sext
@@ -119,6 +159,14 @@ define <8 x i16> @sabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_8h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <8 x i8> %a to <8 x i16>
   %b.sext = sext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.sext, %b.sext
@@ -135,6 +183,12 @@ define <2 x i32> @sabd_2s(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i32> %a to <2 x i64>
   %b.sext = sext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.sext, %b.sext
@@ -154,6 +208,14 @@ define <2 x i32> @sabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i16> %a to <2 x i32>
   %b.sext = sext <2 x i16> %b to <2 x i32>
   %sub = sub <2 x i32> %a.sext, %b.sext
@@ -170,6 +232,12 @@ define <4 x i32> @sabd_4s(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i32> %a to <4 x i64>
   %b.sext = sext <4 x i32> %b to <4 x i64>
   %sub = sub <4 x i64> %a.sext, %b.sext
@@ -189,6 +257,14 @@ define <4 x i32> @sabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_4s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <4 x i16> %a to <4 x i32>
   %b.sext = sext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.sext, %b.sext
@@ -204,6 +280,12 @@ define <2 x i64> @sabd_2d(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i64> %a to <2 x i128>
   %b.sext = sext <2 x i64> %b to <2 x i128>
   %sub = sub <2 x i128> %a.sext, %b.sext
@@ -223,6 +305,14 @@ define <2 x i64> @sabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_2d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabd.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.sext = sext <2 x i32> %a to <2 x i64>
   %b.sext = sext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.sext, %b.sext
@@ -243,6 +333,12 @@ define <8 x i8> @uabd_8b(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_8b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <8 x i8> %a to <8 x i16>
   %b.zext = zext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.zext, %b.zext
@@ -260,6 +356,12 @@ define <16 x i8> @uabd_16b(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_16b:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <16 x i8> %a to <16 x i16>
   %b.zext = zext <16 x i8> %b to <16 x i16>
   %sub = sub <16 x i16> %a.zext, %b.zext
@@ -277,6 +379,12 @@ define <4 x i16> @uabd_4h(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i16> %a to <4 x i32>
   %b.zext = zext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.zext, %b.zext
@@ -296,6 +404,14 @@ define <4 x i16> @uabd_4h_promoted_ops(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i8> %a to <4 x i16>
   %b.zext = zext <4 x i8> %b to <4 x i16>
   %sub = sub <4 x i16> %a.zext, %b.zext
@@ -312,6 +428,12 @@ define <8 x i16> @uabd_8h(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_8h:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <8 x i16> %a to <8 x i32>
   %b.zext = zext <8 x i16> %b to <8 x i32>
   %sub = sub <8 x i32> %a.zext, %b.zext
@@ -331,6 +453,14 @@ define <8 x i16> @uabd_8h_promoted_ops(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_8h_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <8 x i8> %a to <8 x i16>
   %b.zext = zext <8 x i8> %b to <8 x i16>
   %sub = sub <8 x i16> %a.zext, %b.zext
@@ -347,6 +477,12 @@ define <2 x i32> @uabd_2s(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i32> %a to <2 x i64>
   %b.zext = zext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.zext, %b.zext
@@ -366,6 +502,14 @@ define <2 x i32> @uabd_2s_promoted_ops(<2 x i16> %a, <2 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i16> %a to <2 x i32>
   %b.zext = zext <2 x i16> %b to <2 x i32>
   %sub = sub <2 x i32> %a.zext, %b.zext
@@ -382,6 +526,12 @@ define <4 x i32> @uabd_4s(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4s:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i32> %a to <4 x i64>
   %b.zext = zext <4 x i32> %b to <4 x i64>
   %sub = sub <4 x i64> %a.zext, %b.zext
@@ -401,6 +551,14 @@ define <4 x i32> @uabd_4s_promoted_ops(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_4s_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <4 x i16> %a to <4 x i32>
   %b.zext = zext <4 x i16> %b to <4 x i32>
   %sub = sub <4 x i32> %a.zext, %b.zext
@@ -416,6 +574,12 @@ define <2 x i64> @uabd_2d(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2d:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i64> %a to <2 x i128>
   %b.zext = zext <2 x i64> %b to <2 x i128>
   %sub = sub <2 x i128> %a.zext, %b.zext
@@ -435,6 +599,14 @@ define <2 x i64> @uabd_2d_promoted_ops(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_2d_promoted_ops:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabdu.vv v9, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v9
+; ZVABD-NEXT:    ret
   %a.zext = zext <2 x i32> %a to <2 x i64>
   %b.zext = zext <2 x i32> %b to <2 x i64>
   %sub = sub <2 x i64> %a.zext, %b.zext
@@ -451,6 +623,14 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v16i8_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
   ret <16 x i8> %abs
@@ -465,6 +645,14 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v8i16_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
   ret <8 x i16> %abs
@@ -479,6 +667,14 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v4i32_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
   ret <4 x i32> %abs
@@ -493,6 +689,14 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: uabd_v2i64_nuw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vsub.vv v8, v8, v9
+; ZVABD-NEXT:    vrsub.vi v9, v8, 0
+; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nuw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
   ret <2 x i64> %abs
@@ -507,6 +711,12 @@ define <16 x i8> @sabd_v16i8_nsw(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v16i8_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nsw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
   ret <16 x i8> %abs
@@ -521,6 +731,12 @@ define <8 x i16> @sabd_v8i16_nsw(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v8i16_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nsw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
   ret <8 x i16> %abs
@@ -535,6 +751,12 @@ define <4 x i32> @sabd_v4i32_nsw(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v4i32_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nsw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
   ret <4 x i32> %abs
@@ -549,6 +771,12 @@ define <2 x i64> @sabd_v2i64_nsw(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sabd_v2i64_nsw:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %sub = sub nsw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
   ret <2 x i64> %abs
@@ -563,6 +791,12 @@ define <16 x i8> @smaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <16 x i8> @llvm.smax.v16i8(<16 x i8> %0, <16 x i8> %1)
   %b = tail call <16 x i8> @llvm.smin.v16i8(<16 x i8> %0, <16 x i8> %1)
   %sub = sub <16 x i8> %a, %b
@@ -578,6 +812,12 @@ define <8 x i16> @smaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <8 x i16> @llvm.smax.v8i16(<8 x i16> %0, <8 x i16> %1)
   %b = tail call <8 x i16> @llvm.smin.v8i16(<8 x i16> %0, <8 x i16> %1)
   %sub = sub <8 x i16> %a, %b
@@ -593,6 +833,12 @@ define <4 x i32> @smaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> %0, <4 x i32> %1)
   %b = tail call <4 x i32> @llvm.smin.v4i32(<4 x i32> %0, <4 x i32> %1)
   %sub = sub <4 x i32> %a, %b
@@ -608,6 +854,12 @@ define <2 x i64> @smaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: smaxmin_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vabd.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <2 x i64> @llvm.smax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.smin.v2i64(<2 x i64> %0, <2 x i64> %1)
   %sub = sub <2 x i64> %a, %b
@@ -623,6 +875,12 @@ define <16 x i8> @umaxmin_v16i8(<16 x i8> %0, <16 x i8> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
   %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %0, <16 x i8> %1)
   %sub = sub <16 x i8> %a, %b
@@ -638,6 +896,12 @@ define <8 x i16> @umaxmin_v8i16(<8 x i16> %0, <8 x i16> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <8 x i16> @llvm.umax.v8i16(<8 x i16> %0, <8 x i16> %1)
   %b = tail call <8 x i16> @llvm.umin.v8i16(<8 x i16> %0, <8 x i16> %1)
   %sub = sub <8 x i16> %a, %b
@@ -653,6 +917,12 @@ define <4 x i32> @umaxmin_v4i32(<4 x i32> %0, <4 x i32> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <4 x i32> @llvm.umax.v4i32(<4 x i32> %0, <4 x i32> %1)
   %b = tail call <4 x i32> @llvm.umin.v4i32(<4 x i32> %0, <4 x i32> %1)
   %sub = sub <4 x i32> %a, %b
@@ -668,6 +938,12 @@ define <2 x i64> @umaxmin_v2i64(<2 x i64> %0, <2 x i64> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <2 x i64> @llvm.umax.v2i64(<2 x i64> %0, <2 x i64> %1)
   %b = tail call <2 x i64> @llvm.umin.v2i64(<2 x i64> %0, <2 x i64> %1)
   %sub = sub <2 x i64> %a, %b
@@ -683,6 +959,12 @@ define <16 x i8> @umaxmin_v16i8_com1(<16 x i8> %0, <16 x i8> %1) {
 ; CHECK-NEXT:    vmaxu.vv v8, v8, v9
 ; CHECK-NEXT:    vsub.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: umaxmin_v16i8_com1:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    ret
   %a = tail call <16 x i8> @llvm.umax.v16i8(<16 x i8> %0, <16 x i8> %1)
   %b = tail call <16 x i8> @llvm.umin.v16i8(<16 x i8> %1, <16 x i8> %0)
   %sub = sub <16 x i8> %a, %b
@@ -725,3 +1007,5 @@ declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; RV32: {{.*}}
 ; RV64: {{.*}}
+; ZVABD-RV32: {{.*}}
+; ZVABD-RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index c0a213034c95b..a3d6ebd2042c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v | FileCheck %s
 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+experimental-zvabd | FileCheck %s --check-prefix=ZVABD
 
 define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-LABEL: sad_4x8_as_i16:
@@ -16,6 +18,18 @@ define signext i16 @sad_4x8_as_i16(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_4x8_as_i16:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vmv.s.x v9, zero
+; ZVABD-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
+; ZVABD-NEXT:    vwredsumu.vs v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <4 x i8> %a to <4 x i16>
   %3 = zext <4 x i8> %b to <4 x i16>
@@ -38,6 +52,17 @@ define signext i32 @sad_4x8_as_i32(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:    vredsum.vs v8, v9, v8
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_4x8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v9, v8
+; ZVABD-NEXT:    vmv.s.x v8, zero
+; ZVABD-NEXT:    vredsum.vs v8, v9, v8
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <4 x i8> %a to <4 x i32>
   %3 = zext <4 x i8> %b to <4 x i32>
@@ -61,6 +86,18 @@ define signext i16 @sad_16x8_as_i16(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_16x8_as_i16:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vmv.s.x v9, zero
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vwredsumu.vs v8, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <16 x i8> %a to <16 x i16>
   %3 = zext <16 x i8> %b to <16 x i16>
@@ -83,6 +120,17 @@ define signext i32 @sad_16x8_as_i32(<16 x i8> %a, <16 x i8> %b) {
 ; CHECK-NEXT:    vredsum.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_16x8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v12, v8, v9
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v8, v12
+; ZVABD-NEXT:    vmv.s.x v12, zero
+; ZVABD-NEXT:    vredsum.vs v8, v8, v12
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %1 = zext <16 x i8> %a to <16 x i32>
   %3 = zext <16 x i8> %b to <16 x i32>
@@ -135,6 +183,41 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
 ; CHECK-NEXT:    vredsum.vs v8, v8, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: sad_2block_16xi8_as_i32:
+; ZVABD:       # %bb.0: # %entry
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vle8.v v9, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v10, (a0)
+; ZVABD-NEXT:    vle8.v v11, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vle8.v v14, (a0)
+; ZVABD-NEXT:    vle8.v v15, (a1)
+; ZVABD-NEXT:    add a0, a0, a2
+; ZVABD-NEXT:    add a1, a1, a3
+; ZVABD-NEXT:    vabdu.vv v8, v8, v9
+; ZVABD-NEXT:    vle8.v v9, (a0)
+; ZVABD-NEXT:    vabdu.vv v10, v10, v11
+; ZVABD-NEXT:    vle8.v v11, (a1)
+; ZVABD-NEXT:    vwaddu.vv v12, v10, v8
+; ZVABD-NEXT:    vabdu.vv v8, v14, v15
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v14, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vabdu.vv v16, v9, v11
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
+; ZVABD-NEXT:    vzext.vf2 v12, v16
+; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vmv.s.x v12, zero
+; ZVABD-NEXT:    vredsum.vs v8, v8, v12
+; ZVABD-NEXT:    vmv.x.s a0, v8
+; ZVABD-NEXT:    ret
 entry:
   %idx.ext8 = sext i32 %strideb to i64
   %idx.ext = sext i32 %stridea to i64

>From bd5de65da7c9d6e3f8f7084674cc5b3f684534ad Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Wed, 5 Feb 2025 19:17:16 +0800
Subject: [PATCH 3/4] [RISCV][CodeGen] Lower abs to Zvabd instructions

For abs operation, we can synthesize it via vabd.vx with x0 register.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |  27 +-
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td |  20 ++
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  |  26 ++
 llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td    |  20 --
 llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll     |  94 ++++++
 llvm/test/CodeGen/RISCV/rvv/abs-vp.ll         | 319 ++++++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-abd.ll    |  12 +-
 .../CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll | 247 ++++++++++++++
 .../CodeGen/RISCV/rvv/fixed-vectors-abs.ll    | 107 ++++++
 9 files changed, 833 insertions(+), 39 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 802b7634ad32a..048fade110cd4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -881,7 +881,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          Legal);
 
       if (Subtarget.hasStdExtZvabd())
-        setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Legal);
+        setOperationAction({ISD::ABDS, ISD::ABDU, ISD::ABS}, VT, Legal);
       else
         setOperationAction({ISD::ABDS, ISD::ABDU}, VT, Custom);
 
@@ -13083,17 +13083,22 @@ SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
   } else
     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
 
-  SDValue SplatZero = DAG.getNode(
-      RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
-      DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
-  SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
-                             DAG.getUNDEF(ContainerVT), Mask, VL);
-  SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
-                            DAG.getUNDEF(ContainerVT), Mask, VL);
-
+  SDValue Result;
+  if (Subtarget.hasStdExtZvabd()) {
+    Result = DAG.getNode(RISCVISD::ABS_VL, DL, ContainerVT, X,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  } else {
+    SDValue SplatZero = DAG.getNode(
+        RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+        DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
+    SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
+                               DAG.getUNDEF(ContainerVT), Mask, VL);
+    Result = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  }
   if (VT.isFixedLengthVector())
-    Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
-  return Max;
+    Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+  return Result;
 }
 
 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 9f4c6abbf4e24..fddbb5ef1f45d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2091,6 +2091,26 @@ multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
   }
 }
 
+multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
+                         Predicate predicate = HasStdExtZvbb> {
+  foreach vti = AllIntegerVectors in {
+    let Predicates = !listconcat([predicate],
+                                 GetVTypePredicates<vti>.Predicates) in {
+      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
+                                (vti.Vector vti.RegClass:$passthru),
+                                (vti.Mask VMV0:$vm),
+                                VLOpFrag)),
+                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
+                   vti.RegClass:$passthru,
+                   vti.RegClass:$rs1,
+                   (vti.Mask VMV0:$vm),
+                   GPR:$vl,
+                   vti.Log2SEW,
+                   TAIL_AGNOSTIC)>;
+    }
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index a5243e847d728..512bb78f933f0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -29,7 +29,23 @@ let Predicates = [HasStdExtZvabd] in {
 //===----------------------------------------------------------------------===//
 // Pseudos
 //===----------------------------------------------------------------------===//
+
+multiclass PseudoVABS {
+  foreach m = MxList in {
+    defvar mx = m.MX;
+    let VLMul = m.value in {
+      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+                        SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+      def "_V_" # mx # "_MASK" :
+        VPseudoUnaryMask<m.vrclass, m.vrclass>,
+        RISCVMaskedPseudo<MaskIdx=2>,
+        SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+    }
+  }
+}
+
 let Predicates = [HasStdExtZvabd] in {
+  defm PseudoVABS : PseudoVABS;
   defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
   defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
 } // Predicates = [HasStdExtZvabd]
@@ -38,6 +54,7 @@ let Predicates = [HasStdExtZvabd] in {
 // CodeGen Patterns
 //===----------------------------------------------------------------------===//
 let HasPassthruOp = true, HasMaskOp = true in {
+def riscv_abs_vl  : RVSDNode<"ABS_VL", SDT_RISCVIntUnOp_VL>;
 def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
@@ -48,4 +65,13 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU">;
 
 defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD">;
 defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU">;
+
+foreach vti = AllIntegerVectors in {
+  def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
+            (!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
+                    (vti.Vector (IMPLICIT_DEF)),
+                    vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+}
+
+defm : VPatUnaryVL_V<riscv_abs_vl, "PseudoVABS", HasStdExtZvabd>;
 } // Predicates = [HasStdExtZvabd]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 5a5a9edebd925..c98b554c879f7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -708,26 +708,6 @@ foreach vtiToWti = AllWidenableIntVectors in {
 // VL patterns
 //===----------------------------------------------------------------------===//
 
-multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
-                         Predicate predicate = HasStdExtZvbb> {
-  foreach vti = AllIntegerVectors in {
-    let Predicates = !listconcat([predicate],
-                                 GetVTypePredicates<vti>.Predicates) in {
-      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
-                                (vti.Vector vti.RegClass:$passthru),
-                                (vti.Mask VMV0:$vm),
-                                VLOpFrag)),
-                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
-                   vti.RegClass:$passthru,
-                   vti.RegClass:$rs1,
-                   (vti.Mask VMV0:$vm),
-                   GPR:$vl,
-                   vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
-    }
-  }
-}
-
 foreach vti = AllIntegerVectors in {
   let Predicates = !listconcat([HasStdExtZvkb],
                                GetVTypePredicates<vti>.Predicates) in {
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
index 589b9994651d2..c02d07668ff60 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 declare <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16>, i1)
 
@@ -11,6 +15,12 @@ define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 false)
   ret <vscale x 1 x i16> %r
 }
@@ -24,6 +34,12 @@ define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 false)
   ret <vscale x 2 x i16> %r
 }
@@ -37,6 +53,12 @@ define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 false)
   ret <vscale x 4 x i16> %r
 }
@@ -50,6 +72,12 @@ define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 false)
   ret <vscale x 8 x i16> %r
 }
@@ -63,6 +91,12 @@ define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 false)
   ret <vscale x 16 x i16> %r
 }
@@ -76,6 +110,12 @@ define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 false)
   ret <vscale x 32 x i16> %r
 }
@@ -89,6 +129,12 @@ define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 false)
   ret <vscale x 1 x i32> %r
 }
@@ -102,6 +148,12 @@ define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 false)
   ret <vscale x 2 x i32> %r
 }
@@ -115,6 +167,12 @@ define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 false)
   ret <vscale x 4 x i32> %r
 }
@@ -128,6 +186,12 @@ define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 false)
   ret <vscale x 8 x i32> %r
 }
@@ -141,6 +205,12 @@ define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 false)
   ret <vscale x 16 x i32> %r
 }
@@ -154,6 +224,12 @@ define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 false)
   ret <vscale x 1 x i64> %r
 }
@@ -167,6 +243,12 @@ define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 false)
   ret <vscale x 2 x i64> %r
 }
@@ -180,6 +262,12 @@ define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 false)
   ret <vscale x 4 x i64> %r
 }
@@ -193,6 +281,12 @@ define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 false)
   ret <vscale x 8 x i64> %r
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 2bee8de168d7d..f7e24ff455401 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 declare <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8>, i1 immarg, <vscale x 1 x i1>, i32)
 
@@ -13,6 +17,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -24,6 +34,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -37,6 +53,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -48,6 +70,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -61,6 +89,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -72,6 +106,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -85,6 +125,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -96,6 +142,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -109,6 +161,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
@@ -120,6 +178,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
@@ -133,6 +197,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
@@ -144,6 +214,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
@@ -157,6 +233,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
@@ -168,6 +250,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
@@ -181,6 +269,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
@@ -192,6 +286,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
@@ -205,6 +305,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -216,6 +322,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -229,6 +341,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
@@ -240,6 +358,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
@@ -253,6 +377,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
@@ -264,6 +394,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
@@ -277,6 +413,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16(<vscale x 16 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
@@ -288,6 +430,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
@@ -301,6 +449,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16(<vscale x 32 x i16> %va, <vscale x 3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
@@ -312,6 +466,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
@@ -325,6 +485,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
@@ -336,6 +502,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
@@ -349,6 +521,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -360,6 +538,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -373,6 +557,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
@@ -384,6 +574,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
@@ -397,6 +593,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
@@ -408,6 +610,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
@@ -421,6 +629,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32(<vscale x 16 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
@@ -432,6 +646,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
@@ -445,6 +665,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
@@ -456,6 +682,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
@@ -469,6 +701,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
@@ -480,6 +718,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
@@ -493,6 +737,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
@@ -504,6 +754,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
@@ -517,6 +773,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x i64> %v
 }
@@ -528,6 +790,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
@@ -541,6 +809,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
@@ -552,6 +826,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
@@ -582,6 +862,28 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v24, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; ZVABD-NEXT:    vmv1r.v v24, v0
+; ZVABD-NEXT:    csrr a1, vlenb
+; ZVABD-NEXT:    srli a2, a1, 3
+; ZVABD-NEXT:    sub a3, a0, a1
+; ZVABD-NEXT:    vslidedown.vx v0, v0, a2
+; ZVABD-NEXT:    sltu a2, a0, a3
+; ZVABD-NEXT:    addi a2, a2, -1
+; ZVABD-NEXT:    and a2, a2, a3
+; ZVABD-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16, v0.t
+; ZVABD-NEXT:    bltu a0, a1, .LBB46_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    mv a0, a1
+; ZVABD-NEXT:  .LBB46_2:
+; ZVABD-NEXT:    vmv1r.v v0, v24
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i64> %v
 }
@@ -605,6 +907,23 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i3
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    csrr a1, vlenb
+; ZVABD-NEXT:    sub a2, a0, a1
+; ZVABD-NEXT:    sltu a3, a0, a2
+; ZVABD-NEXT:    addi a3, a3, -1
+; ZVABD-NEXT:    and a2, a3, a2
+; ZVABD-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16
+; ZVABD-NEXT:    bltu a0, a1, .LBB47_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    mv a0, a1
+; ZVABD-NEXT:  .LBB47_2:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index f61c17e18b022..a17f16c36ec5a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -628,8 +628,7 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
@@ -650,8 +649,7 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
@@ -672,8 +670,7 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
@@ -694,8 +691,7 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index 84da351de76ba..2228f74bd20c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1 immarg, <2 x i1>, i32)
 
@@ -13,6 +17,12 @@ define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
@@ -24,6 +34,12 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
@@ -37,6 +53,12 @@ define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
@@ -48,6 +70,12 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
@@ -61,6 +89,12 @@ define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
@@ -72,6 +106,12 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
@@ -85,6 +125,12 @@ define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
@@ -96,6 +142,12 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
@@ -109,6 +161,12 @@ define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
@@ -120,6 +178,12 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
@@ -133,6 +197,12 @@ define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
@@ -144,6 +214,12 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
@@ -157,6 +233,12 @@ define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
@@ -168,6 +250,12 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
@@ -181,6 +269,12 @@ define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
@@ -192,6 +286,12 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
@@ -205,6 +305,12 @@ define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
@@ -216,6 +322,12 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
@@ -229,6 +341,12 @@ define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
@@ -240,6 +358,12 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
@@ -253,6 +377,12 @@ define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
@@ -264,6 +394,12 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
@@ -277,6 +413,12 @@ define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
@@ -288,6 +430,12 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
@@ -301,6 +449,12 @@ define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
@@ -312,6 +466,12 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
@@ -325,6 +485,12 @@ define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
@@ -336,6 +502,12 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
@@ -349,6 +521,12 @@ define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
@@ -360,6 +538,12 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
@@ -373,6 +557,12 @@ define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v15i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
   ret <15 x i64> %v
 }
@@ -384,6 +574,12 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v15i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
@@ -397,6 +593,12 @@ define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
@@ -408,6 +610,12 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
@@ -437,6 +645,27 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v16, v16, v24, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v32i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a2, 16
+; ZVABD-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; ZVABD-NEXT:    vslidedown.vi v24, v0, 2
+; ZVABD-NEXT:    mv a1, a0
+; ZVABD-NEXT:    bltu a0, a2, .LBB34_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    li a1, 16
+; ZVABD-NEXT:  .LBB34_2:
+; ZVABD-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    addi a1, a0, -16
+; ZVABD-NEXT:    sltu a0, a0, a1
+; ZVABD-NEXT:    addi a0, a0, -1
+; ZVABD-NEXT:    and a0, a0, a1
+; ZVABD-NEXT:    vmv1r.v v0, v24
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16, v0.t
+; ZVABD-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
@@ -461,6 +690,24 @@ define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0
 ; CHECK-NEXT:    vmax.vv v16, v16, v24
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v32i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a2, 16
+; ZVABD-NEXT:    mv a1, a0
+; ZVABD-NEXT:    bltu a0, a2, .LBB35_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    li a1, 16
+; ZVABD-NEXT:  .LBB35_2:
+; ZVABD-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    addi a1, a0, -16
+; ZVABD-NEXT:    sltu a0, a0, a1
+; ZVABD-NEXT:    addi a0, a0, -1
+; ZVABD-NEXT:    and a0, a0, a1
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16
+; ZVABD-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 2356237d790b6..2efced6a50f73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 define void @abs_v16i8(ptr %x) {
 ; CHECK-LABEL: abs_v16i8:
@@ -11,6 +15,14 @@ define void @abs_v16i8(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse8.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <16 x i8>, ptr %x
   %b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
   store <16 x i8> %b, ptr %x
@@ -27,6 +39,14 @@ define void @abs_v8i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <8 x i16>, ptr %x
   %b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
   store <8 x i16> %b, ptr %x
@@ -43,6 +63,14 @@ define void @abs_v6i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v6i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <6 x i16>, ptr %x
   %b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false)
   store <6 x i16> %b, ptr %x
@@ -59,6 +87,14 @@ define void @abs_v4i32(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse32.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false)
   store <4 x i32> %b, ptr %x
@@ -75,6 +111,14 @@ define void @abs_v2i64(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vle64.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <2 x i64>, ptr %x
   %b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false)
   store <2 x i64> %b, ptr %x
@@ -92,6 +136,15 @@ define void @abs_v32i8(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v32i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a1, 32
+; ZVABD-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse8.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <32 x i8>, ptr %x
   %b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
   store <32 x i8> %b, ptr %x
@@ -108,6 +161,14 @@ define void @abs_v16i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <16 x i16>, ptr %x
   %b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
   store <16 x i16> %b, ptr %x
@@ -124,6 +185,14 @@ define void @abs_v8i32(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse32.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <8 x i32>, ptr %x
   %b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
   store <8 x i32> %b, ptr %x
@@ -140,6 +209,14 @@ define void @abs_v4i64(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVABD-NEXT:    vle64.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i64>, ptr %x
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
   store <4 x i64> %b, ptr %x
@@ -158,6 +235,16 @@ define void @abs_v4i64_of_sext_v4i8(ptr %x) {
 ; CHECK-NEXT:    vzext.vf8 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf8 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i8>, ptr %x
   %a.ext = sext <4 x i8> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -176,6 +263,16 @@ define void @abs_v4i64_of_sext_v4i16(ptr %x) {
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i16>, ptr %x
   %a.ext = sext <4 x i16> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -194,6 +291,16 @@ define void @abs_v4i64_of_sext_v4i32(ptr %x) {
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %a.ext = sext <4 x i32> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)

>From 79b1697581c138659b96b001f187f072854f5425 Mon Sep 17 00:00:00 2001
From: Wang Pengcheng <wangpengcheng.pp at bytedance.com>
Date: Thu, 6 Feb 2025 16:16:04 +0800
Subject: [PATCH 4/4] [RISCV][CodeGen] Combine vwaddu+vabd(u) to vwabdacc(u)

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 40 +++++++++++++++++++
 .../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 13 +++++-
 llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td  | 19 ++++++++-
 .../CodeGen/RISCV/rvv/fixed-vectors-sad.ll    | 12 +++---
 4 files changed, 77 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 048fade110cd4..593a606225687 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17729,6 +17729,44 @@ static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {
                      N->getFlags());
 }
 
+// vwaddu C (vabd A B) -> vwabdacc(A B C)
+// vwaddu C (vabdu A B) -> vwabdaccu(A B C)
+static SDValue performVWABDACCCombine(SDNode *N, SelectionDAG &DAG,
+                                      const RISCVSubtarget &Subtarget) {
+  if (!Subtarget.hasStdExtZvabd())
+    SDValue();
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  SDValue Passthru = N->getOperand(2);
+  if (!Passthru->isUndef())
+    return SDValue();
+
+  SDValue Mask = N->getOperand(3);
+  SDValue VL = N->getOperand(4);
+  auto IsABD = [](SDValue Op) {
+    if (Op->getOpcode() != RISCVISD::ABDS_VL &&
+        Op->getOpcode() != RISCVISD::ABDU_VL)
+      return SDValue();
+    return Op;
+  };
+
+  SDValue Diff = IsABD(Op0);
+  Diff = Diff ? IsABD(Op1) : Diff;
+  if (!Diff)
+    return SDValue();
+  SDValue Acc = Diff == Op0 ? Op1 : Op0;
+
+  SDLoc DL(N);
+  MVT VT = N->getSimpleValueType(0);
+  Acc = DAG.getNode(RISCVISD::VZEXT_VL, DL, VT, Acc, Mask, VL);
+  SDValue Result = DAG.getNode(
+      Diff.getOpcode() == RISCVISD::ABDS_VL ? RISCVISD::VWABDACC_VL
+                                            : RISCVISD::VWABDACCU_VL,
+      DL, VT, Diff.getOperand(0), Diff.getOperand(1), Acc, Mask, VL);
+  return Result;
+}
+
 static SDValue performVWADDSUBW_VLCombine(SDNode *N,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const RISCVSubtarget &Subtarget) {
@@ -20607,6 +20645,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     if (SDValue V = combineVqdotAccum(N, DAG, Subtarget))
       return V;
     return combineToVWMACC(N, DAG, Subtarget);
+  case RISCVISD::VWADDU_VL:
+    return performVWABDACCCombine(N, DAG, Subtarget);
   case RISCVISD::VWADD_W_VL:
   case RISCVISD::VWADDU_W_VL:
   case RISCVISD::VWSUB_W_VL:
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index fddbb5ef1f45d..5eecc27285efd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1808,7 +1808,7 @@ multiclass VPatMultiplyAccVL_VV_VX<PatFrag op, string instruction_name> {
   }
 }
 
-multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> {
+multiclass VPatWidenMultiplyAddVL_VV<SDNode vwmacc_op, string instr_name> {
   foreach vtiTowti = AllWidenableIntVectors in {
     defvar vti = vtiTowti.Vti;
     defvar wti = vtiTowti.Wti;
@@ -1821,6 +1821,17 @@ multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name> {
                 (!cast<Instruction>(instr_name#"_VV_"#vti.LMul.MX#"_MASK")
                     wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                     (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+    }
+  }
+}
+
+multiclass VPatWidenMultiplyAddVL_VV_VX<SDNode vwmacc_op, string instr_name>
+    : VPatWidenMultiplyAddVL_VV<vwmacc_op, instr_name> {
+  foreach vtiTowti = AllWidenableIntVectors in {
+    defvar vti = vtiTowti.Vti;
+    defvar wti = vtiTowti.Wti;
+    let Predicates = !listconcat(GetVTypePredicates<vti>.Predicates,
+                                 GetVTypePredicates<wti>.Predicates) in {
       def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1),
                            (vti.Vector vti.RegClass:$rs2),
                            (wti.Vector wti.RegClass:$rd),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 512bb78f933f0..eb2c2ff92089f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -29,7 +29,6 @@ let Predicates = [HasStdExtZvabd] in {
 //===----------------------------------------------------------------------===//
 // Pseudos
 //===----------------------------------------------------------------------===//
-
 multiclass PseudoVABS {
   foreach m = MxList in {
     defvar mx = m.MX;
@@ -44,10 +43,23 @@ multiclass PseudoVABS {
   }
 }
 
+multiclass VPseudoVWABD_VV {
+  foreach m = MxListW in {
+    defvar mx = m.MX;
+    defm "" : VPseudoTernaryW_VV<m, Commutable = 1>,
+              SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV",
+                           "ReadVIWMulAddV", "ReadVIWMulAddV", mx>;
+  }
+}
+
 let Predicates = [HasStdExtZvabd] in {
   defm PseudoVABS : PseudoVABS;
   defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
   defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
+  let IsRVVWideningReduction = 1 in {
+    defm PseudoVWABDACC : VPseudoVWABD_VV;
+    defm PseudoVWABDACCU : VPseudoVWABD_VV;
+  } // IsRVVWideningReduction = 1
 } // Predicates = [HasStdExtZvabd]
 
 //===----------------------------------------------------------------------===//
@@ -57,6 +69,8 @@ let HasPassthruOp = true, HasMaskOp = true in {
 def riscv_abs_vl  : RVSDNode<"ABS_VL", SDT_RISCVIntUnOp_VL>;
 def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def rvv_vwabdacc_vl  : RVSDNode<"VWABDACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
+def rvv_vwabdaccu_vl : RVSDNode<"VWABDACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
 
 let Predicates = [HasStdExtZvabd] in {
@@ -74,4 +88,7 @@ foreach vti = AllIntegerVectors in {
 }
 
 defm : VPatUnaryVL_V<riscv_abs_vl, "PseudoVABS", HasStdExtZvabd>;
+
+defm : VPatWidenMultiplyAddVL_VV<rvv_vwabdacc_vl, "PseudoVWABDACC">;
+defm : VPatWidenMultiplyAddVL_VV<rvv_vwabdaccu_vl, "PseudoVWABDACCU">;
 } // Predicates = [HasStdExtZvabd]
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index a3d6ebd2042c5..796f9b63883c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -199,16 +199,18 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
 ; ZVABD-NEXT:    vle8.v v15, (a1)
 ; ZVABD-NEXT:    add a0, a0, a2
 ; ZVABD-NEXT:    add a1, a1, a3
-; ZVABD-NEXT:    vabdu.vv v8, v8, v9
-; ZVABD-NEXT:    vle8.v v9, (a0)
+; ZVABD-NEXT:    vle8.v v16, (a0)
+; ZVABD-NEXT:    vle8.v v17, (a1)
 ; ZVABD-NEXT:    vabdu.vv v10, v10, v11
-; ZVABD-NEXT:    vle8.v v11, (a1)
-; ZVABD-NEXT:    vwaddu.vv v12, v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v12, v10
+; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
+; ZVABD-NEXT:    vwabdaccu.vv v12, v8, v9
 ; ZVABD-NEXT:    vabdu.vv v8, v14, v15
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v14, v8
 ; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v16, v9, v11
+; ZVABD-NEXT:    vabdu.vv v16, v16, v17
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
 ; ZVABD-NEXT:    vzext.vf2 v12, v16



More information about the llvm-commits mailing list