[llvm] [AArch64] Use isKnownNonZero to optimize eligible compares to cmn (PR #96349)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 8 14:07:44 PDT 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/96349

>From 3dd193865a3a416f80ba0cfdd5662bfb91b178c9 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 21 Jun 2024 15:12:41 -0400
Subject: [PATCH 1/2] Pre-commit tests (NFC)

---
 llvm/test/CodeGen/AArch64/cmp-chains.ll      | 32 ++++++++++++++++++++
 llvm/test/CodeGen/AArch64/cmp-select-sign.ll | 15 +++++++++
 2 files changed, 47 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 14cb0c82b1c039..d51c9c946f4677 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -258,3 +258,35 @@ define i32 @neg_range_int(i32 %a, i32 %b, i32 %c) {
   ret i32 %retval.0
 }
 
+; (b > -3 || a < -(c | 1))
+define i32 @neg_range_int_cmn(i32 %a, i32 %b, i32 %c) {
+; SDISEL-LABEL: neg_range_int_cmn:
+; SDISEL:       // %bb.0:
+; SDISEL-NEXT:    orr w8, w2, #0x1
+; SDISEL-NEXT:    neg w8, w8
+; SDISEL-NEXT:    cmp w8, w0
+; SDISEL-NEXT:    ccmn w1, #3, #0, le
+; SDISEL-NEXT:    csel w0, w1, w0, gt
+; SDISEL-NEXT:    ret
+;
+; GISEL-LABEL: neg_range_int_cmn:
+; GISEL:       // %bb.0:
+; GISEL-NEXT:    orr w8, w2, #0x1
+; GISEL-NEXT:    cmn w1, #3
+; GISEL-NEXT:    neg w8, w8
+; GISEL-NEXT:    cset w9, gt
+; GISEL-NEXT:    cmp w8, w0
+; GISEL-NEXT:    cset w8, gt
+; GISEL-NEXT:    orr w8, w9, w8
+; GISEL-NEXT:    and w8, w8, #0x1
+; GISEL-NEXT:    tst w8, #0x1
+; GISEL-NEXT:    csel w0, w1, w0, ne
+; GISEL-NEXT:    ret
+  %or = or i32 %c, 1
+  %sub = sub nsw i32 0, %or
+  %cmp = icmp sgt i32 %b, -3
+  %cmp1 = icmp sgt i32 %sub, %a
+  %1 = select i1 %cmp, i1 true, i1 %cmp1
+  %ret = select i1 %1, i32 %b, i32 %a
+  ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index 09a6e26fe5a403..ca20a7a435a648 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -262,4 +262,19 @@ define <4 x i65> @sign_4xi65(<4 x i65> %a) {
   ret <4 x i65> %res
 }
 
+define i32 @or_neg(i32 %x, i32 %y) {
+; CHECK-LABEL: or_neg:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    orr w8, w0, #0x1
+; CHECK-NEXT:    neg w8, w8
+; CHECK-NEXT:    cmp w8, w1
+; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    ret
+  %3 = or i32 %x, 1
+  %4 = sub nsw i32 0, %3
+  %5 = icmp sgt i32 %4, %y
+  %6 = zext i1 %5 to i32
+  ret i32 %6
+}
+
 declare void @use_4xi1(<4 x i1>)

>From 0d60a3f12506c0ba6c8ff301521b806123f8f022 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Fri, 21 Jun 2024 15:26:02 -0400
Subject: [PATCH 2/2] [AArch64] Use isKnownNonZero to optimize to cmn instead
 of cmp

Turning a cmp into cmn saves an extra mov and negate instruction, so take that into account when choosing when to flip the compare operands.

Also do not consider right-hand operands whose absolute value can be encoded into a cmn.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 59 +++++++++++++++----
 llvm/test/CodeGen/AArch64/cmp-chains.ll       |  3 +-
 llvm/test/CodeGen/AArch64/cmp-select-sign.ll  |  5 +-
 .../CodeGen/AArch64/typepromotion-overflow.ll | 16 ++---
 4 files changed, 57 insertions(+), 26 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 7115e387506001..4ecc3e589d113a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3391,6 +3391,11 @@ static bool isLegalArithImmed(uint64_t C) {
   return IsLegal;
 }
 
+static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
+  KnownBits KnownSrc = DAG.computeKnownBits(CheckedVal);
+  return !KnownSrc.getSignedMinValue().isMinSignedValue();
+}
+
 // Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
 // the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
 // can be set differently by this operation. It comes down to whether
@@ -3398,12 +3403,15 @@ static bool isLegalArithImmed(uint64_t C) {
 // everything is fine. If not then the optimization is wrong. Thus general
 // comparisons are only valid if op2 != 0.
 //
-// So, finally, the only LLVM-native comparisons that don't mention C and V
-// are SETEQ and SETNE. They're the only ones we can safely use CMN for in
-// the absence of information about op2.
-static bool isCMN(SDValue Op, ISD::CondCode CC) {
+// So, finally, the only LLVM-native comparisons that don't mention C or V
+// are the ones that aren't unsigned comparisons. They're the only ones we can
+// safely use CMN for in the absence of information about op2.
+static bool isCMN(SDValue Op, SDValue CheckedVal, ISD::CondCode CC,
+                  SelectionDAG &DAG) {
   return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
-         (CC == ISD::SETEQ || CC == ISD::SETNE);
+         (isIntEqualitySetCC(CC) ||
+          (isUnsignedIntSetCC(CC) ? DAG.isKnownNeverZero(CheckedVal)
+                                  : cannotBeIntMin(CheckedVal, DAG)));
 }
 
 static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
@@ -3448,15 +3456,26 @@ static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
   // register to WZR/XZR if it ends up being unused.
   unsigned Opcode = AArch64ISD::SUBS;
 
-  if (isCMN(RHS, CC)) {
+  if (RHS.getOpcode() == ISD::SUB && isCMN(RHS, RHS.getOperand(1), CC, DAG)) {
     // Can we combine a (CMP op1, (sub 0, op2) into a CMN instruction ?
     Opcode = AArch64ISD::ADDS;
     RHS = RHS.getOperand(1);
-  } else if (isCMN(LHS, CC)) {
+  } else if (isCMN(LHS, RHS, CC, DAG) &&
+             isCMN(LHS, LHS.getOperand(1), CC, DAG)) {
     // As we are looking for EQ/NE compares, the operands can be commuted ; can
     // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
+    // Not swapping operands, but negation requires inversion
+    CC = ISD::getSetCCSwappedOperands(CC);
     Opcode = AArch64ISD::ADDS;
     LHS = LHS.getOperand(1);
+  } else if (LHS.getOpcode() == ISD::SUB &&
+             isCMN(LHS, LHS.getOperand(1), CC, DAG)) {
+    // As we are looking for EQ/NE compares, the operands can be commuted ; can
+    // we combine a (CMP (sub 0, op1), op2) into a CMN instruction ?
+    LHS = LHS.getOperand(1);
+    std::swap(LHS, RHS);
+    CC = ISD::getSetCCSwappedOperands(CC);
+    Opcode = AArch64ISD::ADDS;
   } else if (isNullConstant(RHS) && !isUnsignedIntSetCC(CC)) {
     if (LHS.getOpcode() == ISD::AND) {
       // Similarly, (CMP (and X, Y), 0) can be implemented with a TST
@@ -3556,11 +3575,18 @@ static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
     }
   } else if (RHS.getOpcode() == ISD::SUB) {
     SDValue SubOp0 = RHS.getOperand(0);
-    if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
-      // See emitComparison() on why we can only do this for SETEQ and SETNE.
+    if (isCMN(SubOp0, RHS.getOperand(1), CC, DAG)) {
       Opcode = AArch64ISD::CCMN;
       RHS = RHS.getOperand(1);
     }
+  } else if (LHS.getOpcode() == ISD::SUB) {
+    SDValue SubOp0 = LHS.getOperand(0);
+    if (isCMN(SubOp0, LHS.getOperand(1), CC, DAG)) {
+      LHS = LHS.getOperand(1);
+      std::swap(LHS, RHS);
+      CC = ISD::getSetCCSwappedOperands(CC);
+      Opcode = AArch64ISD::CCMN;
+    }
   }
   if (Opcode == 0)
     Opcode = AArch64ISD::CCMP;
@@ -3876,10 +3902,17 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
   //    cmp     w13, w12
   // can be turned into:
   //    cmp     w12, w11, lsl #1
-  if (!isa<ConstantSDNode>(RHS) || !isLegalArithImmed(RHS->getAsZExtVal())) {
-    SDValue TheLHS = isCMN(LHS, CC) ? LHS.getOperand(1) : LHS;
-
-    if (getCmpOperandFoldingProfit(TheLHS) > getCmpOperandFoldingProfit(RHS)) {
+  if (!isa<ConstantSDNode>(RHS) ||
+      !isLegalArithImmed(RHS->getAsAPIntVal().abs().getZExtValue())) {
+    bool LHSIsCMN =
+        LHS.getOpcode() == ISD::SUB && isCMN(LHS, LHS.getOperand(1), CC, DAG);
+    bool RHSIsCMN =
+        RHS.getOpcode() == ISD::SUB && isCMN(RHS, RHS.getOperand(1), CC, DAG);
+    SDValue TheLHS = LHSIsCMN ? LHS.getOperand(1) : LHS;
+    SDValue TheRHS = RHSIsCMN ? RHS.getOperand(1) : RHS;
+
+    if (getCmpOperandFoldingProfit(TheLHS) + (LHSIsCMN ? 1 : 0) >
+        getCmpOperandFoldingProfit(TheRHS) + (RHSIsCMN ? 1 : 0)) {
       std::swap(LHS, RHS);
       CC = ISD::getSetCCSwappedOperands(CC);
     }
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index d51c9c946f4677..4ea515911b0c51 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -263,8 +263,7 @@ define i32 @neg_range_int_cmn(i32 %a, i32 %b, i32 %c) {
 ; SDISEL-LABEL: neg_range_int_cmn:
 ; SDISEL:       // %bb.0:
 ; SDISEL-NEXT:    orr w8, w2, #0x1
-; SDISEL-NEXT:    neg w8, w8
-; SDISEL-NEXT:    cmp w8, w0
+; SDISEL-NEXT:    cmn w0, w8
 ; SDISEL-NEXT:    ccmn w1, #3, #0, le
 ; SDISEL-NEXT:    csel w0, w1, w0, gt
 ; SDISEL-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
index ca20a7a435a648..37821d2993eb98 100644
--- a/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-select-sign.ll
@@ -266,9 +266,8 @@ define i32 @or_neg(i32 %x, i32 %y) {
 ; CHECK-LABEL: or_neg:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    orr w8, w0, #0x1
-; CHECK-NEXT:    neg w8, w8
-; CHECK-NEXT:    cmp w8, w1
-; CHECK-NEXT:    cset w0, gt
+; CHECK-NEXT:    cmn w1, w8
+; CHECK-NEXT:    cset w0, lt
 ; CHECK-NEXT:    ret
   %3 = or i32 %x, 1
   %4 = sub nsw i32 0, %3
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
index 39edc03ced442e..2451ea478ed71e 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
@@ -107,11 +107,11 @@ define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
 define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
 ; CHECK-LABEL: overflow_add_positive_const_limit:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
-; CHECK-NEXT:    mov w9, #8 // =0x8
-; CHECK-NEXT:    cmp w8, w0, sxtb
+; CHECK-NEXT:    sxtb w9, w0
 ; CHECK-NEXT:    mov w8, #16 // =0x10
-; CHECK-NEXT:    csel w0, w9, w8, gt
+; CHECK-NEXT:    cmn w9, #1
+; CHECK-NEXT:    mov w9, #8 // =0x8
+; CHECK-NEXT:    csel w0, w9, w8, lt
 ; CHECK-NEXT:    ret
   %cmp = icmp slt i8 %a, -1
   %res = select i1 %cmp, i32 8, i32 16
@@ -162,11 +162,11 @@ define i32 @safe_add_underflow_neg(i8 zeroext %a) {
 define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
 ; CHECK-LABEL: overflow_sub_negative_const_limit:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #-1 // =0xffffffff
-; CHECK-NEXT:    mov w9, #8 // =0x8
-; CHECK-NEXT:    cmp w8, w0, sxtb
+; CHECK-NEXT:    sxtb w9, w0
 ; CHECK-NEXT:    mov w8, #16 // =0x10
-; CHECK-NEXT:    csel w0, w9, w8, gt
+; CHECK-NEXT:    cmn w9, #1
+; CHECK-NEXT:    mov w9, #8 // =0x8
+; CHECK-NEXT:    csel w0, w9, w8, lt
 ; CHECK-NEXT:    ret
   %cmp = icmp slt i8 %a, -1
   %res = select i1 %cmp, i32 8, i32 16



More information about the llvm-commits mailing list