[llvm] [AArch64] Use isKnownNonZero to optimize eligible compares to cmn (PR #96349)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 16 08:42:01 PDT 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/96349
>From cc55896d38467ffee0493006af5a5957ba0ca9cd Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 21 Jun 2024 15:12:41 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)
---
llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 94 ++++++++++++++++++++
1 file changed, 94 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index 09a6e26fe5a40..6a9b03ec51cc9 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -262,4 +262,98 @@ define <4 x i65> @sign_4xi65(<4 x i65> %a) {
ret <4 x i65> %res
}
+define i32 @or_neg(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_ult(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_ult:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp ugt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+; Negative test
+
+define i32 @or_neg_no_smin(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_no_smin:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w0
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %4 = sub i32 0, %x
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+; Negative test
+
+define i32 @or_neg_ult_no_zero(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_ult_no_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w0
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %4 = sub i32 0, %x
+ %5 = icmp ugt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_no_smin_but_zero(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_no_smin_but_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bic w8, w0, w0, asr #31
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %3 = call i32 @llvm.smax.i32(i32 %x, i32 0)
+ %4 = sub i32 0, %3
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_slt_zero_but_no_smin(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_slt_zero_but_no_smin:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #9 // =0x9
+; CHECK-NEXT: cmp w0, #9
+; CHECK-NEXT: csel w8, w0, w8, lo
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %3 = call i32 @llvm.umin.i32(i32 %x, i32 9)
+ %4 = sub i32 0, %3
+ %5 = icmp ugt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+declare i32 @llvm.smax.i32(i32, i32)
+declare i32 @llvm.umax.i32(i32, i32)
declare void @use_4xi1(<4 x i1>)
>From 220235cc904be808ddd4708639f0ea847e0f7576 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Tue, 16 Jul 2024 11:36:52 -0400
Subject: [PATCH 2/2] [AArch64] Use isKnownNonZero to optimize eligible
compares to cmn
Turning a cmp into cmn saves an extra mov and negate instruction, so take that into account when choosing when to flip the compare operands.
Also do not consider right-hand operands whose absolute value can be encoded into a cmn.
adds 0 and sub 0 differ when carry handling, which is useful in unsigned comparisons.
The problematic case for unsigned comparisons occurs only when the second argument is zero.
Source: https://devblogs.microsoft.com/oldnewthing/20210607-00/?p=105288
---
.../Target/AArch64/AArch64ISelLowering.cpp | 50 ++++++++++++-------
llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 15 +++---
2 files changed, 38 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index df9b0ae1a632f..ec7b770d14c1a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3385,6 +3385,11 @@ static bool isLegalArithImmed(uint64_t C) {
return IsLegal;
}
+static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
+ KnownBits KnownSrc = DAG.computeKnownBits(CheckedVal);
+ return !KnownSrc.getSignedMinValue().isMinSignedValue();
+}
+
// Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
// the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
// can be set differently by this operation. It comes down to whether
@@ -3392,12 +3397,15 @@ static bool isLegalArithImmed(uint64_t C) {
// everything is fine. If not then the optimization is wrong. Thus general
// comparisons are only valid if op2 != 0.
//
-// So, finally, the only LLVM-native comparisons that don't mention C and V
-// are SETEQ and SETNE. They're the only ones we can safely use CMN for in
-// the absence of information about op2.
-static bool isCMN(SDValue Op, ISD::CondCode CC) {
- return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE);
+// So, finally, the only LLVM-native comparisons that don't mention C or V
+// are the ones that aren't unsigned comparisons. They're the only ones we can
+// safely use CMN for in the absence of information about op2.
+static bool isCMN(SDValue Op, SDValue CheckedVal, ISD::CondCode CC,
+ SelectionDAG &DAG) {
+ return isNullConstant(Op.getOperand(0)) &&
+ (isIntEqualitySetCC(CC) ||
+ (isUnsignedIntSetCC(CC) ? DAG.isKnownNeverZero(CheckedVal)
+ : cannotBeIntMin(CheckedVal, DAG)));
}
static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
@@ -3442,11 +3450,12 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// register to WZR/XZR if it ends up being unused.
unsigned Opcode = AArch64ISD::SUBS;
- if (isCMN(RHS, CC)) {
+ if (RHS.getOpcode() == ISD::SUB && isCMN(RHS, RHS.getOperand(1), CC, DAG)) {
// Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
Opcode = AArch64ISD::ADDS;
RHS = RHS.getOperand(1);
- } else if (isCMN(LHS, CC)) {
+ } else if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
+ isIntEqualitySetCC(CC)) {
// As we are looking for EQ/NE compares, the operands can be commuted ; can
// we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
Opcode = AArch64ISD::ADDS;
@@ -3548,13 +3557,16 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
Opcode = AArch64ISD::CCMN;
RHS = DAG.getConstant(Imm.abs(), DL, Const->getValueType(0));
}
- } else if (RHS.getOpcode() == ISD::SUB) {
- SDValue SubOp0 = RHS.getOperand(0);
- if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // See emitComparison() on why we can only do this for SETEQ and SETNE.
- Opcode = AArch64ISD::CCMN;
- RHS = RHS.getOperand(1);
- }
+ } else if (RHS.getOpcode() == ISD::SUB &&
+ isCMN(RHS, RHS.getOperand(1), CC, DAG)) {
+ Opcode = AArch64ISD::CCMN;
+ RHS = RHS.getOperand(1);
+ } else if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
+ isIntEqualitySetCC(CC)) {
+ // As we are looking for EQ/NE compares, the operands can be commuted ; can
+ // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
+ Opcode = AArch64ISD::CCMN;
+ LHS = LHS.getOperand(1);
}
if (Opcode == 0)
Opcode = AArch64ISD::CCMP;
@@ -3872,8 +3884,10 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// cmp w12, w11, lsl #1
if (!isa<ConstantSDNode>(RHS) ||
!isLegalArithImmed(RHS->getAsAPIntVal().abs().getZExtValue())) {
- bool LHSIsCMN = isCMN(LHS, CC);
- bool RHSIsCMN = isCMN(RHS, CC);
+ bool LHSIsCMN =
+ LHS.getOpcode() == ISD::SUB && isCMN(LHS, LHS.getOperand(1), CC, DAG);
+ bool RHSIsCMN =
+ RHS.getOpcode() == ISD::SUB && isCMN(RHS, RHS.getOperand(1), CC, DAG);
SDValue TheLHS = LHSIsCMN ? LHS.getOperand(1) : LHS;
SDValue TheRHS = RHSIsCMN ? RHS.getOperand(1) : RHS;
@@ -3886,7 +3900,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue Cmp;
AArch64CC::CondCode AArch64CC;
- if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
+ if (isIntEqualitySetCC(CC) && isa<ConstantSDNode>(RHS)) {
const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
// The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index 6a9b03ec51cc9..a3861804bed15 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -266,9 +266,8 @@ define i32 @or_neg(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -281,9 +280,8 @@ define i32 @or_neg_ult(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg_ult:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -326,9 +324,8 @@ define i32 @or_neg_no_smin_but_zero(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg_no_smin_but_zero:
; CHECK: // %bb.0:
; CHECK-NEXT: bic w8, w0, w0, asr #31
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%3 = call i32 @llvm.smax.i32(i32 %x, i32 0)
%4 = sub i32 0, %3
More information about the llvm-commits
mailing list