[llvm] [AArch64] Use isKnownNonZero to optimize eligible compares to cmn and ccmn (PR #96349)
via llvm-commits
llvm-commits at lists.llvm.org
Sat Jul 20 14:45:55 PDT 2024
https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/96349
>From 3d5811095f3b7878e7f4449e7b03cfdab6895a76 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 21 Jun 2024 15:12:41 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)
---
llvm/test/CodeGen/AArch64/cmp-chains.ll | 188 ++++++++++++++++
llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 214 +++++++++++++++++++
2 files changed, 402 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 14cb0c82b1c03..3ee6e8f55044e 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -258,3 +258,191 @@ define i32 @neg_range_int(i32 %a, i32 %b, i32 %c) {
ret i32 %retval.0
}
+; (b > -(d | 1) && a < c)
+define i32 @neg_range_int_comp(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #4, lt
+; CHECK-NEXT: csel w0, w1, w0, gt
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp sgt i32 %b, %negd
+ %cmp1 = icmp slt i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b >u -(d | 1) && a < c)
+define i32 @neg_range_int_comp_u(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp_u:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #0, lt
+; CHECK-NEXT: csel w0, w1, w0, hi
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp ugt i32 %b, %negd
+ %cmp1 = icmp slt i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b > -(d | 1) && a u < c)
+define i32 @neg_range_int_comp_ua(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp_ua:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #4, lo
+; CHECK-NEXT: csel w0, w1, w0, gt
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp sgt i32 %b, %negd
+ %cmp1 = icmp ult i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b <= -3 && a > c)
+define i32 @neg_range_int_2(i32 %a, i32 %b, i32 %c) {
+; SDISEL-LABEL: neg_range_int_2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, #4, #4, gt
+; SDISEL-NEXT: csel w0, w1, w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: ccmn w1, #3, #8, gt
+; GISEL-NEXT: csel w0, w1, w0, ge
+; GISEL-NEXT: ret
+ %cmp = icmp sge i32 %b, -3
+ %cmp1 = icmp sgt i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b < -(d | 1) && a >= c)
+define i32 @neg_range_int_comp2(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #0, ge
+; CHECK-NEXT: csel w0, w1, w0, lt
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp slt i32 %b, %negd
+ %cmp1 = icmp sge i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b <u -(d | 1) && a > c)
+define i32 @neg_range_int_comp_u2(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp_u2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #2, gt
+; CHECK-NEXT: csel w0, w1, w0, lo
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp ult i32 %b, %negd
+ %cmp1 = icmp sgt i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b > -(d | 1) && a u > c)
+define i32 @neg_range_int_comp_ua2(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp_ua2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #4, hi
+; CHECK-NEXT: csel w0, w1, w0, gt
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp sgt i32 %b, %negd
+ %cmp1 = icmp ugt i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; (b > -(d | 1) && a u == c)
+define i32 @neg_range_int_comp_ua3(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: neg_range_int_comp_ua3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w3, #0x1
+; CHECK-NEXT: cmp w0, w2
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: ccmp w1, w8, #4, eq
+; CHECK-NEXT: csel w0, w1, w0, gt
+; CHECK-NEXT: ret
+ %dor = or i32 %d, 1
+ %negd = sub i32 0, %dor
+ %cmp = icmp sgt i32 %b, %negd
+ %cmp1 = icmp eq i32 %a, %c
+ %or.cond = and i1 %cmp, %cmp1
+ %retval.0 = select i1 %or.cond, i32 %b, i32 %a
+ ret i32 %retval.0
+}
+
+; -(a | 1) > (b | 3) && a < c
+define i32 @neg_range_int_c(i32 %a, i32 %b, i32 %c) {
+; SDISEL-LABEL: neg_range_int_c:
+; SDISEL: // %bb.0: // %entry
+; SDISEL-NEXT: orr w8, w0, #0x1
+; SDISEL-NEXT: orr w9, w1, #0x3
+; SDISEL-NEXT: neg w8, w8
+; SDISEL-NEXT: cmp w9, w8
+; SDISEL-NEXT: ccmp w2, w0, #2, lo
+; SDISEL-NEXT: cset w0, lo
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_c:
+; GISEL: // %bb.0: // %entry
+; GISEL-NEXT: orr w8, w0, #0x1
+; GISEL-NEXT: orr w9, w1, #0x3
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: cmp w9, w8
+; GISEL-NEXT: cset w8, lo
+; GISEL-NEXT: cmp w2, w0
+; GISEL-NEXT: cset w9, lo
+; GISEL-NEXT: and w0, w8, w9
+; GISEL-NEXT: ret
+entry:
+ %or = or i32 %a, 1
+ %sub = sub i32 0, %or
+ %or1 = or i32 %b, 3
+ %cmp = icmp ult i32 %or1, %sub
+ %cmp2 = icmp ult i32 %c, %a
+ %0 = and i1 %cmp, %cmp2
+ %land.ext = zext i1 %0 to i32
+ ret i32 %land.ext
+}
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index 09a6e26fe5a40..a16528ef871a4 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -262,4 +262,218 @@ define <4 x i65> @sign_4xi65(<4 x i65> %a) {
ret <4 x i65> %res
}
+define i32 @or_neg(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_ugt(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_ugt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp ugt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+; Negative test
+
+define i32 @or_neg_no_smin(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_no_smin:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w0
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %4 = sub i32 0, %x
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+; Negative test
+
+define i32 @or_neg_ult_no_zero(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_ult_no_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w0
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, lo
+; CHECK-NEXT: ret
+ %4 = sub i32 0, %x
+ %5 = icmp ult i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_no_smin_but_zero(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_no_smin_but_zero:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bic w8, w0, w0, asr #31
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %3 = call i32 @llvm.smax.i32(i32 %x, i32 0)
+ %4 = sub i32 0, %3
+ %5 = icmp sgt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_slt_zero_but_no_smin(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_slt_zero_but_no_smin:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #9 // =0x9
+; CHECK-NEXT: cmp w0, #9
+; CHECK-NEXT: csel w8, w0, w8, lo
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %3 = call i32 @llvm.umin.i32(i32 %x, i32 9)
+ %4 = sub i32 0, %3
+ %5 = icmp ugt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg2(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, ge
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp sge i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg3(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg3:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, lt
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp slt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg4(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, le
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp sle i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_ult(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_ult:
+; CHECK: // %bb.0:
+; CHECK-NEXT: orr w8, w0, #0x1
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %3 = or i32 %x, 1
+ %4 = sub i32 0, %3
+ %5 = icmp ugt i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_no_smin2(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_no_smin2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w0
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, ge
+; CHECK-NEXT: ret
+ %4 = sub i32 0, %x
+ %5 = icmp sge i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+; Negative test
+
+define i32 @or_neg_ult_no_zero2(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_ult_no_zero2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w0
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, lo
+; CHECK-NEXT: ret
+ %4 = sub i32 0, %x
+ %5 = icmp ult i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_no_smin_but_zero2(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_no_smin_but_zero2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: bic w8, w0, w0, asr #31
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, le
+; CHECK-NEXT: ret
+ %3 = call i32 @llvm.smax.i32(i32 %x, i32 0)
+ %4 = sub i32 0, %3
+ %5 = icmp sle i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+define i32 @or_neg_slt_zero_but_no_smin2(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg_slt_zero_but_no_smin2:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #9 // =0x9
+; CHECK-NEXT: cmp w0, #9
+; CHECK-NEXT: csel w8, w0, w8, lo
+; CHECK-NEXT: neg w8, w8
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, hs
+; CHECK-NEXT: ret
+ %3 = call i32 @llvm.umin.i32(i32 %x, i32 9)
+ %4 = sub i32 0, %3
+ %5 = icmp uge i32 %4, %y
+ %6 = zext i1 %5 to i32
+ ret i32 %6
+}
+
+declare i32 @llvm.smax.i32(i32, i32)
+declare i32 @llvm.umax.i32(i32, i32)
declare void @use_4xi1(<4 x i1>)
>From 8865a3fef44bc14c57aee01619d35294aa1d05e5 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sat, 20 Jul 2024 17:41:14 -0400
Subject: [PATCH 2/2] [AArch64] Use isKnownNonZero to optimize eligible
compares to cmn and ccmn
The problematic case for unsigned comparisons occurs only when the second argument is zero, and in signed cases when the second argument is the minimum possible signed integer. If we can prove the register value be those, it is safe to fold into CMN and CCMN.
Source: https://devblogs.microsoft.com/oldnewthing/20210607-00/?p=105288
---
.../Target/AArch64/AArch64ISelLowering.cpp | 50 +++--
llvm/test/CodeGen/AArch64/cmp-chains.ll | 171 ++++++++++++------
llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 40 ++--
3 files changed, 162 insertions(+), 99 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 84de1ee8f8923..e37ef32a87a53 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3403,6 +3403,11 @@ static bool isLegalArithImmed(uint64_t C) {
return IsLegal;
}
+static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
+ KnownBits KnownSrc = DAG.computeKnownBits(CheckedVal);
+ return !KnownSrc.getSignedMinValue().isMinSignedValue();
+}
+
// Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
// the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
// can be set differently by this operation. It comes down to whether
@@ -3410,12 +3415,14 @@ static bool isLegalArithImmed(uint64_t C) {
// everything is fine. If not then the optimization is wrong. Thus general
// comparisons are only valid if op2 != 0.
//
-// So, finally, the only LLVM-native comparisons that don't mention C and V
-// are SETEQ and SETNE. They're the only ones we can safely use CMN for in
-// the absence of information about op2.
-static bool isCMN(SDValue Op, ISD::CondCode CC) {
+// So, finally, the only LLVM-native comparisons that don't mention C or V
+// are the ones that aren't unsigned comparisons. They're the only ones we can
+// safely use CMN for in the absence of information about op2.
+static bool isCMN(SDValue Op, ISD::CondCode CC, SelectionDAG &DAG) {
return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
- (CC == ISD::SETEQ || CC == ISD::SETNE);
+ (isIntEqualitySetCC(CC) ||
+ (isUnsignedIntSetCC(CC) && DAG.isKnownNeverZero(Op.getOperand(1))) ||
+ (isSignedIntSetCC(CC) && cannotBeIntMin(Op.getOperand(1), DAG)));
}
static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
@@ -3460,11 +3467,12 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// register to WZR/XZR if it ends up being unused.
unsigned Opcode = AArch64ISD::SUBS;
- if (isCMN(RHS, CC)) {
+ if (isCMN(RHS, CC, DAG)) {
// Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
Opcode = AArch64ISD::ADDS;
RHS = RHS.getOperand(1);
- } else if (isCMN(LHS, CC)) {
+ } else if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
+ isIntEqualitySetCC(CC)) {
// As we are looking for EQ/NE compares, the operands can be commuted ; can
// we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
Opcode = AArch64ISD::ADDS;
@@ -3566,13 +3574,21 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
Opcode = AArch64ISD::CCMN;
RHS = DAG.getConstant(Imm.abs(), DL, Const->getValueType(0));
}
- } else if (RHS.getOpcode() == ISD::SUB) {
- SDValue SubOp0 = RHS.getOperand(0);
- if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
- // See emitComparison() on why we can only do this for SETEQ and SETNE.
- Opcode = AArch64ISD::CCMN;
- RHS = RHS.getOperand(1);
- }
+ } else if (isCMN(RHS, CC, DAG)) {
+ Opcode = AArch64ISD::CCMN;
+ RHS = RHS.getOperand(1);
+ } else if (isCMN(LHS, CC, DAG) &&
+ (isIntEqualitySetCC(CC) ||
+ (isUnsignedIntSetCC(CC) && DAG.isKnownNeverZero(RHS)) ||
+ (isSignedIntSetCC(CC) && cannotBeIntMin(RHS, DAG)))) {
+ // We can communte (CMP (sub 0, op1), op2)) if neither LHS nor RHS can be
+ // INT_MIN if a signed comparison, or 0 if unsigned.
+ Opcode = AArch64ISD::CCMN;
+ LHS = LHS.getOperand(1);
+ // Swap LHS and RHS if it wasn't an equality comparison
+ // So we don't have to worry about changing the CC
+ // a < b -> -b < -a
+ std::swap(LHS, RHS);
}
if (Opcode == 0)
Opcode = AArch64ISD::CCMP;
@@ -3890,8 +3906,8 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// cmp w12, w11, lsl #1
if (!isa<ConstantSDNode>(RHS) ||
!isLegalArithImmed(RHS->getAsAPIntVal().abs().getZExtValue())) {
- bool LHSIsCMN = isCMN(LHS, CC);
- bool RHSIsCMN = isCMN(RHS, CC);
+ bool LHSIsCMN = isCMN(LHS, CC, DAG);
+ bool RHSIsCMN = isCMN(RHS, CC, DAG);
SDValue TheLHS = LHSIsCMN ? LHS.getOperand(1) : LHS;
SDValue TheRHS = RHSIsCMN ? RHS.getOperand(1) : RHS;
@@ -3904,7 +3920,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue Cmp;
AArch64CC::CondCode AArch64CC;
- if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) {
+ if (isIntEqualitySetCC(CC) && isa<ConstantSDNode>(RHS)) {
const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS);
// The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095.
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 3ee6e8f55044e..4b816df75a730 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -260,14 +260,22 @@ define i32 @neg_range_int(i32 %a, i32 %b, i32 %c) {
; (b > -(d | 1) && a < c)
define i32 @neg_range_int_comp(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #4, lt
-; CHECK-NEXT: csel w0, w1, w0, gt
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #4, lt
+; SDISEL-NEXT: csel w0, w1, w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #4, lt
+; GISEL-NEXT: csel w0, w1, w0, gt
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -279,14 +287,22 @@ define i32 @neg_range_int_comp(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b >u -(d | 1) && a < c)
define i32 @neg_range_int_comp_u(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp_u:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #0, lt
-; CHECK-NEXT: csel w0, w1, w0, hi
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp_u:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #0, lt
+; SDISEL-NEXT: csel w0, w1, w0, hi
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp_u:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #0, lt
+; GISEL-NEXT: csel w0, w1, w0, hi
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp ugt i32 %b, %negd
@@ -298,14 +314,22 @@ define i32 @neg_range_int_comp_u(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b > -(d | 1) && a u < c)
define i32 @neg_range_int_comp_ua(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp_ua:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #4, lo
-; CHECK-NEXT: csel w0, w1, w0, gt
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp_ua:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #4, lo
+; SDISEL-NEXT: csel w0, w1, w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp_ua:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #4, lo
+; GISEL-NEXT: csel w0, w1, w0, gt
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -339,14 +363,22 @@ define i32 @neg_range_int_2(i32 %a, i32 %b, i32 %c) {
; (b < -(d | 1) && a >= c)
define i32 @neg_range_int_comp2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #0, ge
-; CHECK-NEXT: csel w0, w1, w0, lt
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #0, ge
+; SDISEL-NEXT: csel w0, w1, w0, lt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #0, ge
+; GISEL-NEXT: csel w0, w1, w0, lt
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp slt i32 %b, %negd
@@ -358,14 +390,22 @@ define i32 @neg_range_int_comp2(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b <u -(d | 1) && a > c)
define i32 @neg_range_int_comp_u2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp_u2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #2, gt
-; CHECK-NEXT: csel w0, w1, w0, lo
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp_u2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #2, gt
+; SDISEL-NEXT: csel w0, w1, w0, lo
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp_u2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #2, gt
+; GISEL-NEXT: csel w0, w1, w0, lo
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp ult i32 %b, %negd
@@ -377,14 +417,22 @@ define i32 @neg_range_int_comp_u2(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b > -(d | 1) && a u > c)
define i32 @neg_range_int_comp_ua2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp_ua2:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #4, hi
-; CHECK-NEXT: csel w0, w1, w0, gt
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp_ua2:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #4, hi
+; SDISEL-NEXT: csel w0, w1, w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp_ua2:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #4, hi
+; GISEL-NEXT: csel w0, w1, w0, gt
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -396,14 +444,22 @@ define i32 @neg_range_int_comp_ua2(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b > -(d | 1) && a u == c)
define i32 @neg_range_int_comp_ua3(i32 %a, i32 %b, i32 %c, i32 %d) {
-; CHECK-LABEL: neg_range_int_comp_ua3:
-; CHECK: // %bb.0:
-; CHECK-NEXT: orr w8, w3, #0x1
-; CHECK-NEXT: cmp w0, w2
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: ccmp w1, w8, #4, eq
-; CHECK-NEXT: csel w0, w1, w0, gt
-; CHECK-NEXT: ret
+; SDISEL-LABEL: neg_range_int_comp_ua3:
+; SDISEL: // %bb.0:
+; SDISEL-NEXT: orr w8, w3, #0x1
+; SDISEL-NEXT: cmp w0, w2
+; SDISEL-NEXT: ccmn w1, w8, #4, eq
+; SDISEL-NEXT: csel w0, w1, w0, gt
+; SDISEL-NEXT: ret
+;
+; GISEL-LABEL: neg_range_int_comp_ua3:
+; GISEL: // %bb.0:
+; GISEL-NEXT: orr w8, w3, #0x1
+; GISEL-NEXT: cmp w0, w2
+; GISEL-NEXT: neg w8, w8
+; GISEL-NEXT: ccmp w1, w8, #4, eq
+; GISEL-NEXT: csel w0, w1, w0, gt
+; GISEL-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -419,8 +475,7 @@ define i32 @neg_range_int_c(i32 %a, i32 %b, i32 %c) {
; SDISEL: // %bb.0: // %entry
; SDISEL-NEXT: orr w8, w0, #0x1
; SDISEL-NEXT: orr w9, w1, #0x3
-; SDISEL-NEXT: neg w8, w8
-; SDISEL-NEXT: cmp w9, w8
+; SDISEL-NEXT: cmn w9, w8
; SDISEL-NEXT: ccmp w2, w0, #2, lo
; SDISEL-NEXT: cset w0, lo
; SDISEL-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index a16528ef871a4..22440b79bdcd4 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -266,9 +266,8 @@ define i32 @or_neg(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -281,9 +280,8 @@ define i32 @or_neg_ugt(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg_ugt:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -326,9 +324,8 @@ define i32 @or_neg_no_smin_but_zero(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg_no_smin_but_zero:
; CHECK: // %bb.0:
; CHECK-NEXT: bic w8, w0, w0, asr #31
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%3 = call i32 @llvm.smax.i32(i32 %x, i32 0)
%4 = sub i32 0, %3
@@ -358,9 +355,8 @@ define i32 @or_neg2(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg2:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, ge
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -373,9 +369,8 @@ define i32 @or_neg3(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg3:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, lt
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -388,9 +383,8 @@ define i32 @or_neg4(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg4:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, le
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -403,9 +397,8 @@ define i32 @or_neg_ult(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg_ult:
; CHECK: // %bb.0:
; CHECK-NEXT: orr w8, w0, #0x1
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%3 = or i32 %x, 1
%4 = sub i32 0, %3
@@ -446,9 +439,8 @@ define i32 @or_neg_no_smin_but_zero2(i32 %x, i32 %y) {
; CHECK-LABEL: or_neg_no_smin_but_zero2:
; CHECK: // %bb.0:
; CHECK-NEXT: bic w8, w0, w0, asr #31
-; CHECK-NEXT: neg w8, w8
-; CHECK-NEXT: cmp w8, w1
-; CHECK-NEXT: cset w0, le
+; CHECK-NEXT: cmn w1, w8
+; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
%3 = call i32 @llvm.smax.i32(i32 %x, i32 0)
%4 = sub i32 0, %3
More information about the llvm-commits
mailing list