[llvm] 972e73b - [RISCV][CodeGen] Lower `ISD::ABS` to Zvabd instructions

via llvm-commits llvm-commits at lists.llvm.org
Sun Feb 8 23:21:30 PST 2026


Author: Pengcheng Wang
Date: 2026-02-09T15:21:25+08:00
New Revision: 972e73b812cb7b6dd349c7c07daae73314f29e8f

URL: https://github.com/llvm/llvm-project/commit/972e73b812cb7b6dd349c7c07daae73314f29e8f
DIFF: https://github.com/llvm/llvm-project/commit/972e73b812cb7b6dd349c7c07daae73314f29e8f.diff

LOG: [RISCV][CodeGen] Lower `ISD::ABS` to Zvabd instructions

We add pseudos/patterns for `vabs.v` instruction and handle the
lowering in `RISCVTargetLowering::lowerABS`.

Reviewers: topperc, 4vtomat, mshockwave, preames, lukel97, tclin914

Reviewed By: mshockwave

Pull Request: https://github.com/llvm/llvm-project/pull/180142

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
    llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 81f16ae6209e3..9b88bc5c39ce4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1006,6 +1006,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          Legal);
 
       if (Subtarget.hasStdExtZvabd()) {
+        setOperationAction(ISD::ABS, VT, Legal);
         // Only SEW=8/16 are supported in Zvabd.
         if (VT.getVectorElementType() == MVT::i8 ||
             VT.getVectorElementType() == MVT::i16)
@@ -13770,17 +13771,22 @@ SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
   } else
     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
 
-  SDValue SplatZero = DAG.getNode(
-      RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
-      DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
-  SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
-                             DAG.getUNDEF(ContainerVT), Mask, VL);
-  SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
-                            DAG.getUNDEF(ContainerVT), Mask, VL);
-
+  SDValue Result;
+  if (Subtarget.hasStdExtZvabd()) {
+    Result = DAG.getNode(RISCVISD::ABS_VL, DL, ContainerVT, X,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  } else {
+    SDValue SplatZero = DAG.getNode(
+        RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
+        DAG.getConstant(0, DL, Subtarget.getXLenVT()), VL);
+    SDValue NegX = DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X,
+                               DAG.getUNDEF(ContainerVT), Mask, VL);
+    Result = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX,
+                         DAG.getUNDEF(ContainerVT), Mask, VL);
+  }
   if (VT.isFixedLengthVector())
-    Max = convertFromScalableVector(VT, Max, DAG, Subtarget);
-  return Max;
+    Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
+  return Result;
 }
 
 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 46b1cefcf6dc0..46dd45876a384 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1955,6 +1955,26 @@ multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
   }
 }
 
+multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
+                         Predicate predicate = HasStdExtZvbb> {
+  foreach vti = AllIntegerVectors in {
+    let Predicates = !listconcat([predicate],
+                                 GetVTypePredicates<vti>.Predicates) in {
+      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
+                                (vti.Vector vti.RegClass:$passthru),
+                                (vti.Mask VMV0:$vm),
+                                VLOpFrag)),
+                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
+                   vti.RegClass:$passthru,
+                   vti.RegClass:$rs1,
+                   (vti.Mask VMV0:$vm),
+                   GPR:$vl,
+                   vti.Log2SEW,
+                   TAIL_AGNOSTIC)>;
+    }
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // Patterns.
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
index 9cd1415c555b6..9c2d41e1aaa26 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvabd.td
@@ -29,7 +29,23 @@ let Predicates = [HasStdExtZvabd] in {
 //===----------------------------------------------------------------------===//
 // Pseudos
 //===----------------------------------------------------------------------===//
+
+multiclass PseudoVABS {
+  foreach m = MxList in {
+    defvar mx = m.MX;
+    let VLMul = m.value in {
+      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
+                       SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+      def "_V_" # mx # "_MASK" :
+        VPseudoUnaryMask<m.vrclass, m.vrclass>,
+        RISCVMaskedPseudo<MaskIdx=2>,
+        SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+    }
+  }
+}
+
 let Predicates = [HasStdExtZvabd] in {
+  defm PseudoVABS : PseudoVABS;
   defm PseudoVABD : VPseudoVALU_VV<Commutable = 1>;
   defm PseudoVABDU : VPseudoVALU_VV<Commutable = 1>;
 } // Predicates = [HasStdExtZvabd]
@@ -38,6 +54,7 @@ let Predicates = [HasStdExtZvabd] in {
 // CodeGen Patterns
 //===----------------------------------------------------------------------===//
 let HasPassthruOp = true, HasMaskOp = true in {
+def riscv_abs_vl  : RVSDNode<"ABS_VL", SDT_RISCVIntUnOp_VL>;
 def riscv_abds_vl : RVSDNode<"ABDS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 def riscv_abdu_vl : RVSDNode<"ABDU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
 } // let HasPassthruOp = true, HasMaskOp = true
@@ -53,4 +70,13 @@ defm : VPatBinarySDNode_VV<abdu, "PseudoVABDU", ABDIntVectors>;
 
 defm : VPatBinaryVL_VV<riscv_abds_vl, "PseudoVABD", ABDIntVectors>;
 defm : VPatBinaryVL_VV<riscv_abdu_vl, "PseudoVABDU", ABDIntVectors>;
+
+foreach vti = AllIntegerVectors in {
+  def : Pat<(vti.Vector (abs (vti.Vector vti.RegClass:$rs2))),
+            (!cast<Instruction>("PseudoVABS_V_"#vti.LMul.MX)
+                    (vti.Vector (IMPLICIT_DEF)),
+                    vti.RegClass:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
+}
+
+defm : VPatUnaryVL_V<riscv_abs_vl, "PseudoVABS", HasStdExtZvabd>;
 } // Predicates = [HasStdExtZvabd]

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 2b94de914b995..3a5ddb8b2b994 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -719,26 +719,6 @@ defm : VPatBinarySDNode_VV_VX<clmulh, "PseudoVCLMULH", I64IntegerVectors, ExtraP
 // VL patterns
 //===----------------------------------------------------------------------===//
 
-multiclass VPatUnaryVL_V<SDPatternOperator op, string instruction_name,
-                         Predicate predicate = HasStdExtZvbb> {
-  foreach vti = AllIntegerVectors in {
-    let Predicates = !listconcat([predicate],
-                                 GetVTypePredicates<vti>.Predicates) in {
-      def : Pat<(vti.Vector (op (vti.Vector vti.RegClass:$rs1),
-                                (vti.Vector vti.RegClass:$passthru),
-                                (vti.Mask VMV0:$vm),
-                                VLOpFrag)),
-                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
-                   vti.RegClass:$passthru,
-                   vti.RegClass:$rs1,
-                   (vti.Mask VMV0:$vm),
-                   GPR:$vl,
-                   vti.Log2SEW,
-                   TAIL_AGNOSTIC)>;
-    }
-  }
-}
-
 foreach vti = AllIntegerVectors in {
   let Predicates = !listconcat([HasStdExtZvkb],
                                GetVTypePredicates<vti>.Predicates) in {

diff  --git a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
index 7260cca64a476..868e6766fda00 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-sdnode.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
 ; CHECK-LABEL: vabs_nxv1i16:
@@ -9,6 +13,12 @@ define <vscale x 1 x i16> @vabs_nxv1i16(<vscale x 1 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i16> @llvm.abs.nxv1i16(<vscale x 1 x i16> %v, i1 false)
   ret <vscale x 1 x i16> %r
 }
@@ -20,6 +30,12 @@ define <vscale x 2 x i16> @vabs_nxv2i16(<vscale x 2 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i16> @llvm.abs.nxv2i16(<vscale x 2 x i16> %v, i1 false)
   ret <vscale x 2 x i16> %r
 }
@@ -31,6 +47,12 @@ define <vscale x 4 x i16> @vabs_nxv4i16(<vscale x 4 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i16> @llvm.abs.nxv4i16(<vscale x 4 x i16> %v, i1 false)
   ret <vscale x 4 x i16> %r
 }
@@ -42,6 +64,12 @@ define <vscale x 8 x i16> @vabs_nxv8i16(<vscale x 8 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %v, i1 false)
   ret <vscale x 8 x i16> %r
 }
@@ -53,6 +81,12 @@ define <vscale x 16 x i16> @vabs_nxv16i16(<vscale x 16 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %v, i1 false)
   ret <vscale x 16 x i16> %r
 }
@@ -64,6 +98,12 @@ define <vscale x 32 x i16> @vabs_nxv32i16(<vscale x 32 x i16> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 32 x i16> @llvm.abs.nxv32i16(<vscale x 32 x i16> %v, i1 false)
   ret <vscale x 32 x i16> %r
 }
@@ -75,6 +115,12 @@ define <vscale x 1 x i32> @vabs_nxv1i32(<vscale x 1 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i32> @llvm.abs.nxv1i32(<vscale x 1 x i32> %v, i1 false)
   ret <vscale x 1 x i32> %r
 }
@@ -86,6 +132,12 @@ define <vscale x 2 x i32> @vabs_nxv2i32(<vscale x 2 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i32> @llvm.abs.nxv2i32(<vscale x 2 x i32> %v, i1 false)
   ret <vscale x 2 x i32> %r
 }
@@ -97,6 +149,12 @@ define <vscale x 4 x i32> @vabs_nxv4i32(<vscale x 4 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %v, i1 false)
   ret <vscale x 4 x i32> %r
 }
@@ -108,6 +166,12 @@ define <vscale x 8 x i32> @vabs_nxv8i32(<vscale x 8 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %v, i1 false)
   ret <vscale x 8 x i32> %r
 }
@@ -119,6 +183,12 @@ define <vscale x 16 x i32> @vabs_nxv16i32(<vscale x 16 x i32> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 16 x i32> @llvm.abs.nxv16i32(<vscale x 16 x i32> %v, i1 false)
   ret <vscale x 16 x i32> %r
 }
@@ -130,6 +200,12 @@ define <vscale x 1 x i64> @vabs_nxv1i64(<vscale x 1 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 1 x i64> @llvm.abs.nxv1i64(<vscale x 1 x i64> %v, i1 false)
   ret <vscale x 1 x i64> %r
 }
@@ -141,6 +217,12 @@ define <vscale x 2 x i64> @vabs_nxv2i64(<vscale x 2 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %v, i1 false)
   ret <vscale x 2 x i64> %r
 }
@@ -152,6 +234,12 @@ define <vscale x 4 x i64> @vabs_nxv4i64(<vscale x 4 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %v, i1 false)
   ret <vscale x 4 x i64> %r
 }
@@ -163,6 +251,12 @@ define <vscale x 8 x i64> @vabs_nxv8i64(<vscale x 8 x i64> %v) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vabs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %r = call <vscale x 8 x i64> @llvm.abs.nxv8i64(<vscale x 8 x i64> %v, i1 false)
   ret <vscale x 8 x i64> %r
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index 5b215c5173211..684c9abb37353 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_abs_nxv1i8:
@@ -11,6 +15,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -22,6 +32,12 @@ define <vscale x 1 x i8> @vp_abs_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i8> @llvm.vp.abs.nxv1i8(<vscale x 1 x i8> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i8> %v
 }
@@ -33,6 +49,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -44,6 +66,12 @@ define <vscale x 2 x i8> @vp_abs_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i8> @llvm.vp.abs.nxv2i8(<vscale x 2 x i8> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i8> %v
 }
@@ -55,6 +83,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -66,6 +100,12 @@ define <vscale x 4 x i8> @vp_abs_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i8> @llvm.vp.abs.nxv4i8(<vscale x 4 x i8> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i8> %v
 }
@@ -77,6 +117,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1>
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -88,6 +134,12 @@ define <vscale x 8 x i8> @vp_abs_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zero
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i8> @llvm.vp.abs.nxv8i8(<vscale x 8 x i8> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i8> %v
 }
@@ -99,6 +151,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i8> %v
 }
@@ -110,6 +168,12 @@ define <vscale x 16 x i8> @vp_abs_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.vp.abs.nxv16i8(<vscale x 16 x i8> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i8> %v
 }
@@ -121,6 +185,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i8> %v
 }
@@ -132,6 +202,12 @@ define <vscale x 32 x i8> @vp_abs_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i8> @llvm.vp.abs.nxv32i8(<vscale x 32 x i8> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i8> %v
 }
@@ -143,6 +219,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> %m, i32 %evl)
   ret <vscale x 64 x i8> %v
 }
@@ -154,6 +236,12 @@ define <vscale x 64 x i8> @vp_abs_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv64i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 64 x i8> @llvm.vp.abs.nxv64i8(<vscale x 64 x i8> %va, i1 false, <vscale x 64 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 64 x i8> %v
 }
@@ -165,6 +253,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %v
 }
@@ -176,6 +270,12 @@ define <vscale x 1 x i16> @vp_abs_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i16> @llvm.vp.abs.nxv1i16(<vscale x 1 x i16> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i16> %v
 }
@@ -187,6 +287,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -198,6 +304,12 @@ define <vscale x 2 x i16> @vp_abs_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i16> @llvm.vp.abs.nxv2i16(<vscale x 2 x i16> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i16> %v
 }
@@ -209,6 +321,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %v
 }
@@ -220,6 +338,12 @@ define <vscale x 4 x i16> @vp_abs_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i16> @llvm.vp.abs.nxv4i16(<vscale x 4 x i16> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i16> %v
 }
@@ -231,6 +355,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %v
 }
@@ -242,6 +372,12 @@ define <vscale x 8 x i16> @vp_abs_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i16> @llvm.vp.abs.nxv8i16(<vscale x 8 x i16> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i16> %v
 }
@@ -253,6 +389,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16(<vscale x 16 x i16> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i16> %v
 }
@@ -264,6 +406,12 @@ define <vscale x 16 x i16> @vp_abs_nxv16i16_unmasked(<vscale x 16 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i16> @llvm.vp.abs.nxv16i16(<vscale x 16 x i16> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i16> %v
 }
@@ -275,6 +423,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16(<vscale x 32 x i16> %va, <vscale x 3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v
 }
@@ -286,6 +440,12 @@ define <vscale x 32 x i16> @vp_abs_nxv32i16_unmasked(<vscale x 32 x i16> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv32i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.abs.nxv32i16(<vscale x 32 x i16> %va, i1 false, <vscale x 32 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 32 x i16> %v
 }
@@ -297,6 +457,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %v
 }
@@ -308,6 +474,12 @@ define <vscale x 1 x i32> @vp_abs_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i32> @llvm.vp.abs.nxv1i32(<vscale x 1 x i32> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i32> %v
 }
@@ -319,6 +491,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -330,6 +508,12 @@ define <vscale x 2 x i32> @vp_abs_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i32> @llvm.vp.abs.nxv2i32(<vscale x 2 x i32> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i32> %v
 }
@@ -341,6 +525,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %v
 }
@@ -352,6 +542,12 @@ define <vscale x 4 x i32> @vp_abs_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i32> @llvm.vp.abs.nxv4i32(<vscale x 4 x i32> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i32> %v
 }
@@ -363,6 +559,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %v
 }
@@ -374,6 +576,12 @@ define <vscale x 8 x i32> @vp_abs_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i32> @llvm.vp.abs.nxv8i32(<vscale x 8 x i32> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i32> %v
 }
@@ -385,6 +593,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32(<vscale x 16 x i32> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i32> %v
 }
@@ -396,6 +610,12 @@ define <vscale x 16 x i32> @vp_abs_nxv16i32_unmasked(<vscale x 16 x i32> %va, i3
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i32> @llvm.vp.abs.nxv16i32(<vscale x 16 x i32> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i32> %v
 }
@@ -407,6 +627,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %v
 }
@@ -418,6 +644,12 @@ define <vscale x 1 x i64> @vp_abs_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv1i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 1 x i64> @llvm.vp.abs.nxv1i64(<vscale x 1 x i64> %va, i1 false, <vscale x 1 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 1 x i64> %v
 }
@@ -429,6 +661,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %v
 }
@@ -440,6 +678,12 @@ define <vscale x 2 x i64> @vp_abs_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv2i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 2 x i64> @llvm.vp.abs.nxv2i64(<vscale x 2 x i64> %va, i1 false, <vscale x 2 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 2 x i64> %v
 }
@@ -451,6 +695,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %v
 }
@@ -462,6 +712,12 @@ define <vscale x 4 x i64> @vp_abs_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv4i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 4 x i64> @llvm.vp.abs.nxv4i64(<vscale x 4 x i64> %va, i1 false, <vscale x 4 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 4 x i64> %v
 }
@@ -473,6 +729,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> %m, i32 %evl)
   ret <vscale x 7 x i64> %v
 }
@@ -484,6 +746,12 @@ define <vscale x 7 x i64> @vp_abs_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv7i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 7 x i64> @llvm.vp.abs.nxv7i64(<vscale x 7 x i64> %va, i1 false, <vscale x 7 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 7 x i64> %v
 }
@@ -495,6 +763,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %v
 }
@@ -506,6 +780,12 @@ define <vscale x 8 x i64> @vp_abs_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 z
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv8i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 8 x i64> @llvm.vp.abs.nxv8i64(<vscale x 8 x i64> %va, i1 false, <vscale x 8 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 8 x i64> %v
 }
@@ -534,6 +814,28 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v24, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; ZVABD-NEXT:    vmv1r.v v24, v0
+; ZVABD-NEXT:    csrr a1, vlenb
+; ZVABD-NEXT:    srli a2, a1, 3
+; ZVABD-NEXT:    sub a3, a0, a1
+; ZVABD-NEXT:    vslidedown.vx v0, v0, a2
+; ZVABD-NEXT:    sltu a2, a0, a3
+; ZVABD-NEXT:    addi a2, a2, -1
+; ZVABD-NEXT:    and a2, a2, a3
+; ZVABD-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16, v0.t
+; ZVABD-NEXT:    bltu a0, a1, .LBB46_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    mv a0, a1
+; ZVABD-NEXT:  .LBB46_2:
+; ZVABD-NEXT:    vmv1r.v v0, v24
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
   ret <vscale x 16 x i64> %v
 }
@@ -557,6 +859,23 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64_unmasked(<vscale x 16 x i64> %va, i3
 ; CHECK-NEXT:    vrsub.vi v24, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v24
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_nxv16i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    csrr a1, vlenb
+; ZVABD-NEXT:    sub a2, a0, a1
+; ZVABD-NEXT:    sltu a3, a0, a2
+; ZVABD-NEXT:    addi a3, a3, -1
+; ZVABD-NEXT:    and a2, a3, a2
+; ZVABD-NEXT:    vsetvli zero, a2, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16
+; ZVABD-NEXT:    bltu a0, a1, .LBB47_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    mv a0, a1
+; ZVABD-NEXT:  .LBB47_2:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <vscale x 16 x i64> @llvm.vp.abs.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> splat (i1 true), i32 %evl)
   ret <vscale x 16 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
index 998668dc26bb8..6bfac12fa3b99 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abd.ll
@@ -644,8 +644,7 @@ define <16 x i8> @uabd_v16i8_nuw(<16 x i8> %a, <16 x i8> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <16 x i8> %a, %b
   %abs = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %sub, i1 true)
@@ -666,8 +665,7 @@ define <8 x i16> @uabd_v8i16_nuw(<8 x i16> %a, <8 x i16> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <8 x i16> %a, %b
   %abs = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %sub, i1 true)
@@ -688,8 +686,7 @@ define <4 x i32> @uabd_v4i32_nuw(<4 x i32> %a, <4 x i32> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <4 x i32> %a, %b
   %abs = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %sub, i1 true)
@@ -710,8 +707,7 @@ define <2 x i64> @uabd_v2i64_nuw(<2 x i64> %a, <2 x i64> %b) {
 ; ZVABD:       # %bb.0:
 ; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; ZVABD-NEXT:    vsub.vv v8, v8, v9
-; ZVABD-NEXT:    vrsub.vi v9, v8, 0
-; ZVABD-NEXT:    vmax.vv v8, v8, v9
+; ZVABD-NEXT:    vabs.v v8, v8
 ; ZVABD-NEXT:    ret
   %sub = sub nuw <2 x i64> %a, %b
   %abs = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %sub, i1 true)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index fa81e1f6f3514..3a6dc2ba9b9e5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -3,6 +3,10 @@
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv32 -mattr=+v,+m,+experimental-zvabd -target-abi=ilp32d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+m,+experimental-zvabd -target-abi=lp64d \
+; RUN:   -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVABD
 
 define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_abs_v2i8:
@@ -11,6 +15,12 @@ define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %v
 }
@@ -22,6 +32,12 @@ define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i8> %v
 }
@@ -33,6 +49,12 @@ define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %v
 }
@@ -44,6 +66,12 @@ define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i8> %v
 }
@@ -55,6 +83,12 @@ define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %v
 }
@@ -66,6 +100,12 @@ define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i8> %v
 }
@@ -77,6 +117,12 @@ define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i8> %v
 }
@@ -88,6 +134,12 @@ define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i8_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i8> %v
 }
@@ -99,6 +151,12 @@ define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %v
 }
@@ -110,6 +168,12 @@ define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i16> %v
 }
@@ -121,6 +185,12 @@ define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %v
 }
@@ -132,6 +202,12 @@ define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i16> %v
 }
@@ -143,6 +219,12 @@ define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %v
 }
@@ -154,6 +236,12 @@ define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i16> %v
 }
@@ -165,6 +253,12 @@ define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i16> %v
 }
@@ -176,6 +270,12 @@ define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i16_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i16> %v
 }
@@ -187,6 +287,12 @@ define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %v
 }
@@ -198,6 +304,12 @@ define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i32> %v
 }
@@ -209,6 +321,12 @@ define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %v
 }
@@ -220,6 +338,12 @@ define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i32> %v
 }
@@ -231,6 +355,12 @@ define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %v
 }
@@ -242,6 +372,12 @@ define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i32> %v
 }
@@ -253,6 +389,12 @@ define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i32> %v
 }
@@ -264,6 +406,12 @@ define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i32_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i32> %v
 }
@@ -275,6 +423,12 @@ define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v9, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %v
 }
@@ -286,6 +440,12 @@ define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v2i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, i1 false, <2 x i1> splat (i1 true), i32 %evl)
   ret <2 x i64> %v
 }
@@ -297,6 +457,12 @@ define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v10, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %v
 }
@@ -308,6 +474,12 @@ define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v4i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, i1 false, <4 x i1> splat (i1 true), i32 %evl)
   ret <4 x i64> %v
 }
@@ -319,6 +491,12 @@ define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v12, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %v
 }
@@ -330,6 +508,12 @@ define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v12, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v12
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v8i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, i1 false, <8 x i1> splat (i1 true), i32 %evl)
   ret <8 x i64> %v
 }
@@ -341,6 +525,12 @@ define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v15i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> %m, i32 %evl)
   ret <15 x i64> %v
 }
@@ -352,6 +542,12 @@ define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v15i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, i1 false, <15 x i1> splat (i1 true), i32 %evl)
   ret <15 x i64> %v
 }
@@ -363,6 +559,12 @@ define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v8, v8, v16, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> %m, i32 %evl)
   ret <16 x i64> %v
 }
@@ -374,6 +576,12 @@ define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v16, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v16
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v16i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    ret
   %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, i1 false, <16 x i1> splat (i1 true), i32 %evl)
   ret <16 x i64> %v
 }
@@ -401,6 +609,27 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0, v0.t
 ; CHECK-NEXT:    vmax.vv v16, v16, v24, v0.t
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v32i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a2, 16
+; ZVABD-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
+; ZVABD-NEXT:    vslidedown.vi v24, v0, 2
+; ZVABD-NEXT:    mv a1, a0
+; ZVABD-NEXT:    bltu a0, a2, .LBB34_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    li a1, 16
+; ZVABD-NEXT:  .LBB34_2:
+; ZVABD-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8, v0.t
+; ZVABD-NEXT:    addi a1, a0, -16
+; ZVABD-NEXT:    sltu a0, a0, a1
+; ZVABD-NEXT:    addi a0, a0, -1
+; ZVABD-NEXT:    and a0, a0, a1
+; ZVABD-NEXT:    vmv1r.v v0, v24
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16, v0.t
+; ZVABD-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> %m, i32 %evl)
   ret <32 x i64> %v
 }
@@ -425,6 +654,24 @@ define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
 ; CHECK-NEXT:    vrsub.vi v24, v16, 0
 ; CHECK-NEXT:    vmax.vv v16, v16, v24
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: vp_abs_v32i64_unmasked:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a2, 16
+; ZVABD-NEXT:    mv a1, a0
+; ZVABD-NEXT:    bltu a0, a2, .LBB35_2
+; ZVABD-NEXT:  # %bb.1:
+; ZVABD-NEXT:    li a1, 16
+; ZVABD-NEXT:  .LBB35_2:
+; ZVABD-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    addi a1, a0, -16
+; ZVABD-NEXT:    sltu a0, a0, a1
+; ZVABD-NEXT:    addi a0, a0, -1
+; ZVABD-NEXT:    and a0, a0, a1
+; ZVABD-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
+; ZVABD-NEXT:    vabs.v v16, v16
+; ZVABD-NEXT:    ret
   %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, i1 false, <32 x i1> splat (i1 true), i32 %evl)
   ret <32 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 847722ae6b8ab..05c2d101ea6bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvabd -verify-machineinstrs < %s \
+; RUN:     | FileCheck %s --check-prefixes=ZVABD
 
 define void @abs_v16i8(ptr %x) {
 ; CHECK-LABEL: abs_v16i8:
@@ -11,6 +15,14 @@ define void @abs_v16i8(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v16i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse8.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <16 x i8>, ptr %x
   %b = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
   store <16 x i8> %b, ptr %x
@@ -26,6 +38,14 @@ define void @abs_v8i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v8i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <8 x i16>, ptr %x
   %b = call <8 x i16> @llvm.abs.v8i16(<8 x i16> %a, i1 false)
   store <8 x i16> %b, ptr %x
@@ -41,6 +61,14 @@ define void @abs_v6i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v6i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <6 x i16>, ptr %x
   %b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false)
   store <6 x i16> %b, ptr %x
@@ -56,6 +84,14 @@ define void @abs_v4i32(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse32.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %b = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false)
   store <4 x i32> %b, ptr %x
@@ -71,6 +107,14 @@ define void @abs_v2i64(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v2i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVABD-NEXT:    vle64.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <2 x i64>, ptr %x
   %b = call <2 x i64> @llvm.abs.v2i64(<2 x i64> %a, i1 false)
   store <2 x i64> %b, ptr %x
@@ -87,6 +131,15 @@ define void @abs_v32i8(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v32i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    li a1, 32
+; ZVABD-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse8.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <32 x i8>, ptr %x
   %b = call <32 x i8> @llvm.abs.v32i8(<32 x i8> %a, i1 false)
   store <32 x i8> %b, ptr %x
@@ -102,6 +155,14 @@ define void @abs_v16i16(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v16i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse16.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <16 x i16>, ptr %x
   %b = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %a, i1 false)
   store <16 x i16> %b, ptr %x
@@ -117,6 +178,14 @@ define void @abs_v8i32(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v8i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse32.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <8 x i32>, ptr %x
   %b = call <8 x i32> @llvm.abs.v8i32(<8 x i32> %a, i1 false)
   store <8 x i32> %b, ptr %x
@@ -132,6 +201,14 @@ define void @abs_v4i64(ptr %x) {
 ; CHECK-NEXT:    vmax.vv v8, v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVABD-NEXT:    vle64.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v8, v8
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i64>, ptr %x
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a, i1 false)
   store <4 x i64> %b, ptr %x
@@ -149,6 +226,16 @@ define void @abs_v4i64_of_sext_v4i8(ptr %x) {
 ; CHECK-NEXT:    vzext.vf8 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i8:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; ZVABD-NEXT:    vle8.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf8 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i8>, ptr %x
   %a.ext = sext <4 x i8> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -167,6 +254,16 @@ define void @abs_v4i64_of_sext_v4i16(ptr %x) {
 ; CHECK-NEXT:    vzext.vf4 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i16:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
+; ZVABD-NEXT:    vle16.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf4 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i16>, ptr %x
   %a.ext = sext <4 x i16> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)
@@ -185,6 +282,16 @@ define void @abs_v4i64_of_sext_v4i32(ptr %x) {
 ; CHECK-NEXT:    vzext.vf2 v8, v10
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
+;
+; ZVABD-LABEL: abs_v4i64_of_sext_v4i32:
+; ZVABD:       # %bb.0:
+; ZVABD-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; ZVABD-NEXT:    vle32.v v8, (a0)
+; ZVABD-NEXT:    vabs.v v10, v8
+; ZVABD-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v8, v10
+; ZVABD-NEXT:    vse64.v v8, (a0)
+; ZVABD-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %a.ext = sext <4 x i32> %a to <4 x i64>
   %b = call <4 x i64> @llvm.abs.v4i64(<4 x i64> %a.ext, i1 false)


        


More information about the llvm-commits mailing list