[llvm] 7a6fd49 - [RISCV] Use short forward branch for ISD::ABS.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 21 11:00:21 PST 2023
Author: Craig Topper
Date: 2023-11-21T11:00:06-08:00
New Revision: 7a6fd49c8a4f9576d3dafe98e78f9c4bc3798105
URL: https://github.com/llvm/llvm-project/commit/7a6fd49c8a4f9576d3dafe98e78f9c4bc3798105
DIFF: https://github.com/llvm/llvm-project/commit/7a6fd49c8a4f9576d3dafe98e78f9c4bc3798105.diff
LOG: [RISCV] Use short forward branch for ISD::ABS.
We can use short forward branch to conditionally negate if the
value is negative.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVInstrInfo.td
llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index f722a0b57a5062a..05d0d5f964a8328 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -396,9 +396,14 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
}
- if (!RV64LegalI32 && Subtarget.is64Bit())
+ if (!RV64LegalI32 && Subtarget.is64Bit() &&
+ !Subtarget.hasShortForwardBranchOpt())
setOperationAction(ISD::ABS, MVT::i32, Custom);
+ // We can use PseudoCCSUB to implement ABS.
+ if (Subtarget.hasShortForwardBranchOpt())
+ setOperationAction(ISD::ABS, XLenVT, Legal);
+
if (!Subtarget.hasVendorXTHeadCondMov())
setOperationAction(ISD::SELECT, XLenVT, Custom);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index af34e026bed1488..0f1d1d4cb23cee3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1242,6 +1242,10 @@ def anyext_oneuse : unop_oneuse<anyext>;
def ext_oneuse : unop_oneuse<ext>;
def fpext_oneuse : unop_oneuse<any_fpextend>;
+def 33signbits_node : PatLeaf<(i64 GPR:$src), [{
+ return CurDAG->ComputeNumSignBits(SDValue(N, 0)) > 32;
+}]>;
+
/// Simple arithmetic operations
def : PatGprGpr<add, ADD>;
@@ -2002,6 +2006,15 @@ def : Pat<(binop_allwusers<add> GPR:$rs1, (AddiPair:$rs2)),
(AddiPairImmSmall AddiPair:$rs2))>;
}
+let Predicates = [HasShortForwardBranchOpt] in
+def : Pat<(XLenVT (abs GPR:$rs1)),
+ (PseudoCCSUB (XLenVT GPR:$rs1), (XLenVT X0), /* COND_LT */ 2,
+ (XLenVT GPR:$rs1), (XLenVT X0), (XLenVT GPR:$rs1))>;
+let Predicates = [HasShortForwardBranchOpt, IsRV64] in
+def : Pat<(sext_inreg (abs 33signbits_node:$rs1), i32),
+ (PseudoCCSUBW (i64 GPR:$rs1), (i64 X0), /* COND_LT */ 2,
+ (XLenVT GPR:$rs1), (i64 X0), (i64 GPR:$rs1))>;
+
//===----------------------------------------------------------------------===//
// Experimental RV64 i32 legalization patterns.
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
index 553bda6ff67330b..725b8fd6eeea6b3 100644
--- a/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt.ll
@@ -1458,3 +1458,81 @@ entry:
%1 = select i1 %cond, i32 %C, i32 %0
ret i32 %1
}
+
+define signext i32 @abs_i32(i32 signext %x) {
+; NOSFB-LABEL: abs_i32:
+; NOSFB: # %bb.0:
+; NOSFB-NEXT: sraiw a1, a0, 31
+; NOSFB-NEXT: xor a0, a0, a1
+; NOSFB-NEXT: subw a0, a0, a1
+; NOSFB-NEXT: ret
+;
+; RV64SFB-LABEL: abs_i32:
+; RV64SFB: # %bb.0:
+; RV64SFB-NEXT: bgez a0, .LBB34_2
+; RV64SFB-NEXT: # %bb.1:
+; RV64SFB-NEXT: negw a0, a0
+; RV64SFB-NEXT: .LBB34_2:
+; RV64SFB-NEXT: ret
+;
+; ZICOND-LABEL: abs_i32:
+; ZICOND: # %bb.0:
+; ZICOND-NEXT: bgez a0, .LBB34_2
+; ZICOND-NEXT: # %bb.1:
+; ZICOND-NEXT: negw a0, a0
+; ZICOND-NEXT: .LBB34_2:
+; ZICOND-NEXT: ret
+;
+; RV32SFB-LABEL: abs_i32:
+; RV32SFB: # %bb.0:
+; RV32SFB-NEXT: bgez a0, .LBB34_2
+; RV32SFB-NEXT: # %bb.1:
+; RV32SFB-NEXT: neg a0, a0
+; RV32SFB-NEXT: .LBB34_2:
+; RV32SFB-NEXT: ret
+ %a = call i32 @llvm.abs.i32(i32 %x, i1 false)
+ ret i32 %a
+}
+declare i32 @llvm.abs.i32(i32, i1)
+
+define i64 @abs_i64(i64 %x) {
+; NOSFB-LABEL: abs_i64:
+; NOSFB: # %bb.0:
+; NOSFB-NEXT: srai a1, a0, 63
+; NOSFB-NEXT: xor a0, a0, a1
+; NOSFB-NEXT: sub a0, a0, a1
+; NOSFB-NEXT: ret
+;
+; RV64SFB-LABEL: abs_i64:
+; RV64SFB: # %bb.0:
+; RV64SFB-NEXT: bgez a0, .LBB35_2
+; RV64SFB-NEXT: # %bb.1:
+; RV64SFB-NEXT: neg a0, a0
+; RV64SFB-NEXT: .LBB35_2:
+; RV64SFB-NEXT: ret
+;
+; ZICOND-LABEL: abs_i64:
+; ZICOND: # %bb.0:
+; ZICOND-NEXT: bgez a0, .LBB35_2
+; ZICOND-NEXT: # %bb.1:
+; ZICOND-NEXT: neg a0, a0
+; ZICOND-NEXT: .LBB35_2:
+; ZICOND-NEXT: ret
+;
+; RV32SFB-LABEL: abs_i64:
+; RV32SFB: # %bb.0:
+; RV32SFB-NEXT: snez a2, a0
+; RV32SFB-NEXT: add a2, a2, a1
+; RV32SFB-NEXT: bgez a1, .LBB35_2
+; RV32SFB-NEXT: # %bb.1:
+; RV32SFB-NEXT: neg a0, a0
+; RV32SFB-NEXT: .LBB35_2:
+; RV32SFB-NEXT: bgez a1, .LBB35_4
+; RV32SFB-NEXT: # %bb.3:
+; RV32SFB-NEXT: neg a1, a2
+; RV32SFB-NEXT: .LBB35_4:
+; RV32SFB-NEXT: ret
+ %a = call i64 @llvm.abs.i64(i64 %x, i1 false)
+ ret i64 %a
+}
+declare i64 @llvm.abs.i64(i64, i1)
More information about the llvm-commits
mailing list