[llvm] [RISCV] Narrow vector absolute value (PR #82041)

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 16 14:59:43 PST 2024


https://github.com/preames updated https://github.com/llvm/llvm-project/pull/82041

>From 3161696bcab0933d4b1d9a789b2d016932b31ea0 Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 16 Feb 2024 13:05:47 -0800
Subject: [PATCH 1/2] [RISCV] Narrow vector absolute value

If we have a abs(sext a) we can legally perform this as a sext (abs a).
(See the same combine in instcombine - note that the IntMinIsPoison flag
 doesn't exist in SDAG yet.)

On RVV, this is likely profitable because it may allow us to perform
the arithmetic operations involved in the abs at a narrower lmul
before widening for the user.  We could arguably avoid narrowing
below DLEN, but the transform should at worst move around the
sext and create one extra vsetvli toggle if the source could
previously be handled via loads explicit w/EEW.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   | 15 ++++++++++-
 .../CodeGen/RISCV/rvv/fixed-vectors-abs.ll    | 27 ++++++++++---------
 2 files changed, 29 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d4cee26d5f727f..2fc1418d5e768a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1417,7 +1417,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR,
                          ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
                          ISD::EXPERIMENTAL_VP_REVERSE, ISD::MUL,
-                         ISD::INSERT_VECTOR_ELT});
+                         ISD::INSERT_VECTOR_ELT, ISD::ABS});
   if (Subtarget.hasVendorXTHeadMemPair())
     setTargetDAGCombine({ISD::LOAD, ISD::STORE});
   if (Subtarget.useRVVForFixedLengthVectors())
@@ -15611,6 +15611,19 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
                        DAG.getConstant(~SignBit, DL, VT));
   }
+  case ISD::ABS: {
+    EVT VT = N->getValueType(0);
+    SDValue N0 = N->getOperand(0);
+    // abs (sext) -> sext (abs)
+    // abs (zext) -> zext (handled elsewhere)
+    if (VT.isVector() && N0.hasOneUse() && N0.getOpcode() == ISD::SIGN_EXTEND) {
+      SDValue Src = N0.getOperand(0);
+      SDLoc DL(N);
+      return DAG.getNode(ISD::SIGN_EXTEND, DL, VT,
+                         DAG.getNode(ISD::ABS, DL, Src.getValueType(), Src));
+    }
+    break;
+  }
   case ISD::ADD: {
     if (SDValue V = combineBinOp_VLToVWBinOp_VL(N, DCI, Subtarget))
       return V;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index d2e0113e69b900..118c0743ab9698 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -152,12 +152,13 @@ declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1)
 define void @abs_v4i64_of_sext_v4i8(ptr %x) {
 ; CHECK-LABEL: abs_v4i64_of_sext_v4i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a0)
+; CHECK-NEXT:    vrsub.vi v9, v8, 0
+; CHECK-NEXT:    vmax.vv v8, v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsext.vf8 v10, v8
-; CHECK-NEXT:    vrsub.vi v8, v10, 0
-; CHECK-NEXT:    vmax.vv v8, v10, v8
-; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v10, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, ptr %x
   %a.ext = sext <4 x i8> %a to <4 x i64>
@@ -169,12 +170,13 @@ define void @abs_v4i64_of_sext_v4i8(ptr %x) {
 define void @abs_v4i64_of_sext_v4i16(ptr %x) {
 ; CHECK-LABEL: abs_v4i64_of_sext_v4i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
+; CHECK-NEXT:    vrsub.vi v9, v8, 0
+; CHECK-NEXT:    vmax.vv v8, v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsext.vf4 v10, v8
-; CHECK-NEXT:    vrsub.vi v8, v10, 0
-; CHECK-NEXT:    vmax.vv v8, v10, v8
-; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v10, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
   %a.ext = sext <4 x i16> %a to <4 x i64>
@@ -186,12 +188,13 @@ define void @abs_v4i64_of_sext_v4i16(ptr %x) {
 define void @abs_v4i64_of_sext_v4i32(ptr %x) {
 ; CHECK-LABEL: abs_v4i64_of_sext_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vle32.v v8, (a0)
+; CHECK-NEXT:    vrsub.vi v9, v8, 0
+; CHECK-NEXT:    vmax.vv v8, v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vrsub.vi v8, v10, 0
-; CHECK-NEXT:    vmax.vv v8, v10, v8
-; CHECK-NEXT:    vse64.v v8, (a0)
+; CHECK-NEXT:    vse64.v v10, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x
   %a.ext = sext <4 x i32> %a to <4 x i64>

>From 10015d6b0f079df52badb9e165ec4ec55cb842ff Mon Sep 17 00:00:00 2001
From: Philip Reames <preames at rivosinc.com>
Date: Fri, 16 Feb 2024 14:59:30 -0800
Subject: [PATCH 2/2] Use the right cast

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp      | 4 ++--
 llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll | 6 +++---
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 2fc1418d5e768a..d985fb38df97e9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -15614,12 +15614,12 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
   case ISD::ABS: {
     EVT VT = N->getValueType(0);
     SDValue N0 = N->getOperand(0);
-    // abs (sext) -> sext (abs)
+    // abs (sext) -> zext (abs)
     // abs (zext) -> zext (handled elsewhere)
     if (VT.isVector() && N0.hasOneUse() && N0.getOpcode() == ISD::SIGN_EXTEND) {
       SDValue Src = N0.getOperand(0);
       SDLoc DL(N);
-      return DAG.getNode(ISD::SIGN_EXTEND, DL, VT,
+      return DAG.getNode(ISD::ZERO_EXTEND, DL, VT,
                          DAG.getNode(ISD::ABS, DL, Src.getValueType(), Src));
     }
     break;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
index 118c0743ab9698..f607add17b4b9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
@@ -157,7 +157,7 @@ define void @abs_v4i64_of_sext_v4i8(ptr %x) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vsext.vf8 v10, v8
+; CHECK-NEXT:    vzext.vf8 v10, v8
 ; CHECK-NEXT:    vse64.v v10, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, ptr %x
@@ -175,7 +175,7 @@ define void @abs_v4i64_of_sext_v4i16(ptr %x) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vsext.vf4 v10, v8
+; CHECK-NEXT:    vzext.vf4 v10, v8
 ; CHECK-NEXT:    vse64.v v10, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %x
@@ -193,7 +193,7 @@ define void @abs_v4i64_of_sext_v4i32(ptr %x) {
 ; CHECK-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-NEXT:    vmax.vv v8, v8, v9
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v10, v8
 ; CHECK-NEXT:    vse64.v v10, (a0)
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %x



More information about the llvm-commits mailing list