[llvm] c19e900 - [AArch64] Signed comparison using CMN is safe when the subtraction is nsw (#141993)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 12 13:02:54 PDT 2025
Author: AZero13
Date: 2025-06-12T21:02:51+01:00
New Revision: c19e900ce8b422f6b8c028fbbd9ef7e9d3720236
URL: https://github.com/llvm/llvm-project/commit/c19e900ce8b422f6b8c028fbbd9ef7e9d3720236
DIFF: https://github.com/llvm/llvm-project/commit/c19e900ce8b422f6b8c028fbbd9ef7e9d3720236.diff
LOG: [AArch64] Signed comparison using CMN is safe when the subtraction is nsw (#141993)
nsw means no signed wrap, and 0 - INT_MIN is a signed wrap.
Now, this is going to be a point I need to get out of the way:
So is it okay to always transform a > -b into cmn if it is a signed
comparison, even if b is INT_MIN because -INT_MIN is undefined, at least
in C, because unless fwrapv is specified, opt puts nsw on signed integer
operations, allowing for more folds anyway.
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index ac545534d728b..5b9e699eaa408 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3392,8 +3392,19 @@ bool isLegalCmpImmed(APInt C) {
return isLegalArithImmed(C.abs().getZExtValue());
}
-static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
- KnownBits KnownSrc = DAG.computeKnownBits(CheckedVal);
+static bool isSafeSignedCMN(SDValue Op, SelectionDAG &DAG) {
+ // 0 - INT_MIN sign wraps, so no signed wrap means cmn is safe.
+ if (Op->getFlags().hasNoSignedWrap())
+ return true;
+
+ // We can still figure out if the second operand is safe to use
+ // in a CMN instruction by checking if it is known to be not the minimum
+ // signed value. If it is not, then we can safely use CMN.
+ // Note: We can eventually remove this check and simply rely on
+ // Op->getFlags().hasNoSignedWrap() once SelectionDAG/ISelLowering
+ // consistently sets them appropriately when making said nodes.
+
+ KnownBits KnownSrc = DAG.computeKnownBits(Op.getOperand(1));
return !KnownSrc.getSignedMinValue().isMinSignedValue();
}
@@ -3402,7 +3413,7 @@ static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
// can be set
diff erently by this operation. It comes down to whether
// "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
// everything is fine. If not then the optimization is wrong. Thus general
-// comparisons are only valid if op2 != 0.
+// comparisons are only valid if op2 != 0 and op2 != INT_MIN.
//
// So, finally, the only LLVM-native comparisons that don't mention C or V
// are the ones that aren't unsigned comparisons. They're the only ones we can
@@ -3411,7 +3422,7 @@ static bool isCMN(SDValue Op, ISD::CondCode CC, SelectionDAG &DAG) {
return Op.getOpcode() == ISD::SUB && isNullConstant(Op.getOperand(0)) &&
(isIntEqualitySetCC(CC) ||
(isUnsignedIntSetCC(CC) && DAG.isKnownNeverZero(Op.getOperand(1))) ||
- (isSignedIntSetCC(CC) && cannotBeIntMin(Op.getOperand(1), DAG)));
+ (isSignedIntSetCC(CC) && isSafeSignedCMN(Op, DAG)));
}
static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index c5fd9b63cce97..5765e0acae269 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -602,3 +602,49 @@ define i1 @almost_immediate_neg_ugt_64(i64 %x) {
%cmp = icmp ugt i64 %x, -16773121
ret i1 %cmp
}
+
+define i1 @cmn_nsw(i32 %a, i32 %b) {
+; CHECK-LABEL: cmn_nsw:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, w1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %sub = sub nsw i32 0, %b
+ %cmp = icmp sgt i32 %a, %sub
+ ret i1 %cmp
+}
+
+define i1 @cmn_nsw_64(i64 %a, i64 %b) {
+; CHECK-LABEL: cmn_nsw_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, x1
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %sub = sub nsw i64 0, %b
+ %cmp = icmp sgt i64 %a, %sub
+ ret i1 %cmp
+}
+
+define i1 @cmn_nsw_neg(i32 %a, i32 %b) {
+; CHECK-LABEL: cmn_nsw_neg:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg w8, w1
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %sub = sub i32 0, %b
+ %cmp = icmp sgt i32 %a, %sub
+ ret i1 %cmp
+}
+
+define i1 @cmn_nsw_neg_64(i64 %a, i64 %b) {
+; CHECK-LABEL: cmn_nsw_neg_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: neg x8, x1
+; CHECK-NEXT: cmp x0, x8
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %sub = sub i64 0, %b
+ %cmp = icmp sgt i64 %a, %sub
+ ret i1 %cmp
+}
More information about the llvm-commits
mailing list