[llvm] e00366d - [AArch64] Check for negative numbers when adjusting icmps (#141151)
via llvm-commits
llvm-commits at lists.llvm.org
Fri May 30 07:22:57 PDT 2025
Author: AZero13
Date: 2025-05-30T15:22:54+01:00
New Revision: e00366dcde294b420ae9decf5ac5d85463fd274f
URL: https://github.com/llvm/llvm-project/commit/e00366dcde294b420ae9decf5ac5d85463fd274f
DIFF: https://github.com/llvm/llvm-project/commit/e00366dcde294b420ae9decf5ac5d85463fd274f.diff
LOG: [AArch64] Check for negative numbers when adjusting icmps (#141151)
This relies on the fact that we can use tst and ands for comparisons as
by emitComparison.
Also has mitigations for when comparing with -1 and 1 to avoid
regressions.
Fixes: https://github.com/llvm/llvm-project/issues/141137
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/arm64-csel.ll
llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
llvm/test/CodeGen/AArch64/csel-subs-swapped.ll
llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
llvm/test/CodeGen/AArch64/select-constant-xor.ll
llvm/test/CodeGen/AArch64/signbit-shift.ll
llvm/test/CodeGen/AArch64/signbit-test.ll
llvm/test/CodeGen/AArch64/typepromotion-signed.ll
llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
llvm/test/CodeGen/AArch64/win64_vararg.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index f18d325148742..349bcd95c09f6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3342,6 +3342,12 @@ static bool isLegalArithImmed(uint64_t C) {
return IsLegal;
}
+bool isLegalCmpImmed(APInt C) {
+ // Works for negative immediates too, as it can be written as an ADDS
+ // instruction with a negated immediate.
+ return isLegalArithImmed(C.abs().getZExtValue());
+}
+
static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {
KnownBits KnownSrc = DAG.computeKnownBits(CheckedVal);
return !KnownSrc.getSignedMinValue().isMinSignedValue();
@@ -3767,58 +3773,82 @@ static unsigned getCmpOperandFoldingProfit(SDValue Op) {
return 0;
}
+// emitComparison() converts comparison with one or negative one to comparison
+// with 0. Note that this only works for signed comparisons because of how ANDS
+// works.
+static bool shouldBeAdjustedToZero(SDValue LHS, APInt C, ISD::CondCode &CC) {
+ // Only works for ANDS and AND.
+ if (LHS.getOpcode() != ISD::AND && LHS.getOpcode() != AArch64ISD::ANDS)
+ return false;
+
+ if (C.isOne() && (CC == ISD::SETLT || CC == ISD::SETGE)) {
+ CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
+ return true;
+ }
+
+ if (C.isAllOnes() && (CC == ISD::SETLE || CC == ISD::SETGT)) {
+ CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
+ return true;
+ }
+
+ return false;
+}
+
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &AArch64cc, SelectionDAG &DAG,
const SDLoc &dl) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
EVT VT = RHS.getValueType();
- uint64_t C = RHSC->getZExtValue();
- if (!isLegalArithImmed(C)) {
+ APInt C = RHSC->getAPIntValue();
+ // shouldBeAdjustedToZero is a special case to better fold with
+ // emitComparison().
+ if (shouldBeAdjustedToZero(LHS, C, CC)) {
+ // Adjust the constant to zero.
+ // CC has already been adjusted.
+ RHS = DAG.getConstant(0, dl, VT);
+ } else if (!isLegalCmpImmed(C)) {
// Constant does not fit, try adjusting it by one?
switch (CC) {
default:
break;
case ISD::SETLT:
case ISD::SETGE:
- if ((VT == MVT::i32 && C != 0x80000000 &&
- isLegalArithImmed((uint32_t)(C - 1))) ||
- (VT == MVT::i64 && C != 0x80000000ULL &&
- isLegalArithImmed(C - 1ULL))) {
- CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
- C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
- RHS = DAG.getConstant(C, dl, VT);
+ if (!C.isMinSignedValue()) {
+ APInt CMinusOne = C - 1;
+ if (isLegalCmpImmed(CMinusOne)) {
+ CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
+ RHS = DAG.getConstant(CMinusOne, dl, VT);
+ }
}
break;
case ISD::SETULT:
case ISD::SETUGE:
- if ((VT == MVT::i32 && C != 0 &&
- isLegalArithImmed((uint32_t)(C - 1))) ||
- (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) {
- CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
- C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1;
- RHS = DAG.getConstant(C, dl, VT);
+ if (!C.isZero()) {
+ APInt CMinusOne = C - 1;
+ if (isLegalCmpImmed(CMinusOne)) {
+ CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
+ RHS = DAG.getConstant(CMinusOne, dl, VT);
+ }
}
break;
case ISD::SETLE:
case ISD::SETGT:
- if ((VT == MVT::i32 && C != INT32_MAX &&
- isLegalArithImmed((uint32_t)(C + 1))) ||
- (VT == MVT::i64 && C != INT64_MAX &&
- isLegalArithImmed(C + 1ULL))) {
- CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
- C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
- RHS = DAG.getConstant(C, dl, VT);
+ if (!C.isMaxSignedValue()) {
+ APInt CPlusOne = C + 1;
+ if (isLegalCmpImmed(CPlusOne)) {
+ CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
+ RHS = DAG.getConstant(CPlusOne, dl, VT);
+ }
}
break;
case ISD::SETULE:
case ISD::SETUGT:
- if ((VT == MVT::i32 && C != UINT32_MAX &&
- isLegalArithImmed((uint32_t)(C + 1))) ||
- (VT == MVT::i64 && C != UINT64_MAX &&
- isLegalArithImmed(C + 1ULL))) {
- CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
- C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1;
- RHS = DAG.getConstant(C, dl, VT);
+ if (!C.isAllOnes()) {
+ APInt CPlusOne = C + 1;
+ if (isLegalCmpImmed(CPlusOne)) {
+ CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
+ RHS = DAG.getConstant(CPlusOne, dl, VT);
+ }
}
break;
}
@@ -3835,8 +3865,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
// cmp w13, w12
// can be turned into:
// cmp w12, w11, lsl #1
- if (!isa<ConstantSDNode>(RHS) ||
- !isLegalArithImmed(RHS->getAsAPIntVal().abs().getZExtValue())) {
+ if (!isa<ConstantSDNode>(RHS) || !isLegalCmpImmed(RHS->getAsAPIntVal())) {
bool LHSIsCMN = isCMN(LHS, CC, DAG);
bool RHSIsCMN = isCMN(RHS, CC, DAG);
SDValue TheLHS = LHSIsCMN ? LHS.getOperand(1) : LHS;
@@ -17365,17 +17394,10 @@ LLT AArch64TargetLowering::getOptimalMemOpLLT(
// 12-bit optionally shifted immediates are legal for adds.
bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {
if (Immed == std::numeric_limits<int64_t>::min()) {
- LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed
- << ": avoid UB for INT64_MIN\n");
return false;
}
// Same encoding for add/sub, just flip the sign.
- Immed = std::abs(Immed);
- bool IsLegal = ((Immed >> 12) == 0 ||
- ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
- LLVM_DEBUG(dbgs() << "Is " << Immed
- << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n");
- return IsLegal;
+ return isLegalArithImmed((uint64_t)std::abs(Immed));
}
bool AArch64TargetLowering::isLegalAddScalableImmediate(int64_t Imm) const {
diff --git a/llvm/test/CodeGen/AArch64/arm64-csel.ll b/llvm/test/CodeGen/AArch64/arm64-csel.ll
index 1cf99d1b31a8b..69fad57a683ac 100644
--- a/llvm/test/CodeGen/AArch64/arm64-csel.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-csel.ll
@@ -100,9 +100,8 @@ define i32 @foo7(i32 %a, i32 %b) nounwind {
; CHECK-NEXT: subs w8, w0, w1
; CHECK-NEXT: cneg w9, w8, mi
; CHECK-NEXT: cmn w8, #1
-; CHECK-NEXT: csel w10, w9, w0, lt
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w10, w9, ge
+; CHECK-NEXT: csel w8, w9, w0, lt
+; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
entry:
%sub = sub nsw i32 %a, %b
diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
index 8fbed8bfdb3fd..1d60929f2b94c 100644
--- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
+++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
@@ -14,8 +14,8 @@ define i32 @f_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind {
; CHECK-LABEL: f_i8_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w8, w1, w2, ge
+; CHECK-NEXT: cmn w8, #1
+; CHECK-NEXT: csel w8, w1, w2, gt
; CHECK-NEXT: add w0, w8, w0, uxtb
; CHECK-NEXT: ret
entry:
@@ -36,8 +36,8 @@ define i32 @f_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind {
; CHECK-LABEL: f_i16_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w8, w1, w2, ge
+; CHECK-NEXT: cmn w8, #1
+; CHECK-NEXT: csel w8, w1, w2, gt
; CHECK-NEXT: add w0, w8, w0, uxth
; CHECK-NEXT: ret
entry:
@@ -57,8 +57,8 @@ B:
define i64 @f_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind {
; CHECK-LABEL: f_i32_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: csel x8, x1, x2, ge
+; CHECK-NEXT: cmn w0, #1
+; CHECK-NEXT: csel x8, x1, x2, gt
; CHECK-NEXT: add x0, x8, w0, uxtw
; CHECK-NEXT: ret
entry:
@@ -145,8 +145,8 @@ define i64 @f_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: sxtw x8, w0
-; CHECK-NEXT: cmp x8, #0
-; CHECK-NEXT: csel x8, x1, x2, ge
+; CHECK-NEXT: cmn x8, #1
+; CHECK-NEXT: csel x8, x1, x2, gt
; CHECK-NEXT: add x0, x8, w0, uxtw
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
index e87d43161a895..c5fd9b63cce97 100644
--- a/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-to-cmn.ll
@@ -430,3 +430,175 @@ entry:
%cmp = icmp ne i32 %conv, %add
ret i1 %cmp
}
+
+define i1 @cmn_large_imm(i32 %a) {
+; CHECK-LABEL: cmn_large_imm:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #64765 // =0xfcfd
+; CHECK-NEXT: movk w8, #64764, lsl #16
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %a, -50529027
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_slt(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_slt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, le
+; CHECK-NEXT: ret
+ %cmp = icmp slt i32 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_slt_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_slt_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, le
+; CHECK-NEXT: ret
+ %cmp = icmp slt i64 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_sge(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_sge:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %cmp = icmp sge i32 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_sge_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_sge_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, gt
+; CHECK-NEXT: ret
+ %cmp = icmp sge i64 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_uge(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_uge:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %cmp = icmp uge i32 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_uge_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_uge_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, hi
+; CHECK-NEXT: ret
+ %cmp = icmp uge i64 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_ult(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_ult:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, ls
+; CHECK-NEXT: ret
+ %cmp = icmp ult i32 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_ult_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_ult_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4079, lsl #12 // =16707584
+; CHECK-NEXT: cset w0, ls
+; CHECK-NEXT: ret
+ %cmp = icmp ult i64 %x, -16707583
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_sle(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_sle:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, lt
+; CHECK-NEXT: ret
+ %cmp = icmp sle i32 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_sle_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_sle_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, lt
+; CHECK-NEXT: ret
+ %cmp = icmp sle i64 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_sgt(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_sgt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, ge
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i32 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_sgt_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_sgt_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, ge
+; CHECK-NEXT: ret
+ %cmp = icmp sgt i64 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_ule(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_ule:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, lo
+; CHECK-NEXT: ret
+ %cmp = icmp ule i32 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_ule_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_ule_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, lo
+; CHECK-NEXT: ret
+ %cmp = icmp ule i64 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_ugt(i32 %x) {
+; CHECK-LABEL: almost_immediate_neg_ugt:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn w0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, hs
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i32 %x, -16773121
+ ret i1 %cmp
+}
+
+define i1 @almost_immediate_neg_ugt_64(i64 %x) {
+; CHECK-LABEL: almost_immediate_neg_ugt_64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: cmn x0, #4095, lsl #12 // =16773120
+; CHECK-NEXT: cset w0, hs
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i64 %x, -16773121
+ ret i1 %cmp
+}
diff --git a/llvm/test/CodeGen/AArch64/csel-subs-swapped.ll b/llvm/test/CodeGen/AArch64/csel-subs-swapped.ll
index 3971da27cdddc..7d2c7854baf3d 100644
--- a/llvm/test/CodeGen/AArch64/csel-subs-swapped.ll
+++ b/llvm/test/CodeGen/AArch64/csel-subs-swapped.ll
@@ -44,10 +44,8 @@ define i32 @sge_i32(i32 %x) {
; CHECK-LABEL: sge_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-2097152 // =0xffe00000
-; CHECK-NEXT: mov w9, #-2097153 // =0xffdfffff
-; CHECK-NEXT: sub w8, w8, w0
-; CHECK-NEXT: cmp w0, w9
-; CHECK-NEXT: csel w0, w0, w8, gt
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: csel w0, w0, w8, le
; CHECK-NEXT: ret
%cmp = icmp sge i32 %x, -2097152
%sub = sub i32 -2097152, %x
@@ -72,10 +70,8 @@ define i32 @sle_i32(i32 %x) {
; CHECK-LABEL: sle_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-2097152 // =0xffe00000
-; CHECK-NEXT: mov w9, #-2097151 // =0xffe00001
-; CHECK-NEXT: sub w8, w8, w0
-; CHECK-NEXT: cmp w0, w9
-; CHECK-NEXT: csel w0, w0, w8, lt
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: csel w0, w0, w8, ge
; CHECK-NEXT: ret
%cmp = icmp sle i32 %x, -2097152
%sub = sub i32 -2097152, %x
@@ -128,10 +124,8 @@ define i32 @ule_i32(i32 %x) {
; CHECK-LABEL: ule_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-2097152 // =0xffe00000
-; CHECK-NEXT: mov w9, #-2097151 // =0xffe00001
-; CHECK-NEXT: sub w8, w8, w0
-; CHECK-NEXT: cmp w0, w9
-; CHECK-NEXT: csel w0, w0, w8, lo
+; CHECK-NEXT: subs w8, w8, w0
+; CHECK-NEXT: csel w0, w0, w8, hs
; CHECK-NEXT: ret
%cmp = icmp ule i32 %x, -2097152
%sub = sub i32 -2097152, %x
diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index c8c1e9007c7a0..9912c7ae19493 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -257,9 +257,8 @@ define void @flag_setting() {
; CHECK-NEXT: tst x9, x10, lsl #63
; CHECK-NEXT: b.lt .LBB2_4
; CHECK-NEXT: // %bb.2: // %test3
-; CHECK-NEXT: and x10, x9, x10, asr #12
-; CHECK-NEXT: cmp x10, #1
-; CHECK-NEXT: b.ge .LBB2_4
+; CHECK-NEXT: tst x9, x10, asr #12
+; CHECK-NEXT: b.gt .LBB2_4
; CHECK-NEXT: // %bb.3: // %other_exit
; CHECK-NEXT: str x9, [x8]
; CHECK-NEXT: .LBB2_4: // %common.ret
diff --git a/llvm/test/CodeGen/AArch64/select-constant-xor.ll b/llvm/test/CodeGen/AArch64/select-constant-xor.ll
index 6803411f66896..fe9a2c0fad830 100644
--- a/llvm/test/CodeGen/AArch64/select-constant-xor.ll
+++ b/llvm/test/CodeGen/AArch64/select-constant-xor.ll
@@ -168,8 +168,8 @@ define i32 @icmpasreq(i32 %input, i32 %a, i32 %b) {
define i32 @icmpasrne(i32 %input, i32 %a, i32 %b) {
; CHECK-SD-LABEL: icmpasrne:
; CHECK-SD: // %bb.0:
-; CHECK-SD-NEXT: cmp w0, #0
-; CHECK-SD-NEXT: csel w0, w1, w2, ge
+; CHECK-SD-NEXT: cmn w0, #1
+; CHECK-SD-NEXT: csel w0, w1, w2, gt
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: icmpasrne:
diff --git a/llvm/test/CodeGen/AArch64/signbit-shift.ll b/llvm/test/CodeGen/AArch64/signbit-shift.ll
index 253ea1cab91fb..0e6da326a31f4 100644
--- a/llvm/test/CodeGen/AArch64/signbit-shift.ll
+++ b/llvm/test/CodeGen/AArch64/signbit-shift.ll
@@ -43,8 +43,8 @@ define i32 @sel_ifpos_tval_bigger(i32 %x) {
; CHECK-LABEL: sel_ifpos_tval_bigger:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #41 // =0x29
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: cinc w0, w8, ge
+; CHECK-NEXT: cmn w0, #1
+; CHECK-NEXT: cinc w0, w8, gt
; CHECK-NEXT: ret
%c = icmp sgt i32 %x, -1
%r = select i1 %c, i32 42, i32 41
@@ -91,8 +91,8 @@ define i32 @sel_ifpos_fval_bigger(i32 %x) {
; CHECK-LABEL: sel_ifpos_fval_bigger:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #41 // =0x29
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: cinc w0, w8, lt
+; CHECK-NEXT: cmn w0, #1
+; CHECK-NEXT: cinc w0, w8, le
; CHECK-NEXT: ret
%c = icmp sgt i32 %x, -1
%r = select i1 %c, i32 41, i32 42
diff --git a/llvm/test/CodeGen/AArch64/signbit-test.ll b/llvm/test/CodeGen/AArch64/signbit-test.ll
index f5eaf80cf7f8d..c74a934ee09d8 100644
--- a/llvm/test/CodeGen/AArch64/signbit-test.ll
+++ b/llvm/test/CodeGen/AArch64/signbit-test.ll
@@ -4,9 +4,9 @@
define i64 @test_clear_mask_i64_i32(i64 %x) nounwind {
; CHECK-LABEL: test_clear_mask_i64_i32:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: csel x0, x8, x0, ge
+; CHECK-NEXT: mov w8, #42 // =0x2a
+; CHECK-NEXT: cmn w0, #1
+; CHECK-NEXT: csel x0, x8, x0, gt
; CHECK-NEXT: ret
entry:
%a = and i64 %x, 2147483648
@@ -22,7 +22,7 @@ f:
define i64 @test_set_mask_i64_i32(i64 %x) nounwind {
; CHECK-LABEL: test_set_mask_i64_i32:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst x0, #0x80000000
; CHECK-NEXT: csel x0, x8, x0, ne
; CHECK-NEXT: ret
@@ -40,7 +40,7 @@ f:
define i64 @test_clear_mask_i64_i16(i64 %x) nounwind {
; CHECK-LABEL: test_clear_mask_i64_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst x0, #0x8000
; CHECK-NEXT: csel x0, x8, x0, eq
; CHECK-NEXT: ret
@@ -58,7 +58,7 @@ f:
define i64 @test_set_mask_i64_i16(i64 %x) nounwind {
; CHECK-LABEL: test_set_mask_i64_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst x0, #0x8000
; CHECK-NEXT: csel x0, x8, x0, ne
; CHECK-NEXT: ret
@@ -76,7 +76,7 @@ f:
define i64 @test_clear_mask_i64_i8(i64 %x) nounwind {
; CHECK-LABEL: test_clear_mask_i64_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst x0, #0x80
; CHECK-NEXT: csel x0, x8, x0, eq
; CHECK-NEXT: ret
@@ -94,7 +94,7 @@ f:
define i64 @test_set_mask_i64_i8(i64 %x) nounwind {
; CHECK-LABEL: test_set_mask_i64_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst x0, #0x80
; CHECK-NEXT: csel x0, x8, x0, ne
; CHECK-NEXT: ret
@@ -112,7 +112,7 @@ f:
define i32 @test_clear_mask_i32_i16(i32 %x) nounwind {
; CHECK-LABEL: test_clear_mask_i32_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x8000
; CHECK-NEXT: csel w0, w8, w0, eq
; CHECK-NEXT: ret
@@ -130,7 +130,7 @@ f:
define i32 @test_set_mask_i32_i16(i32 %x) nounwind {
; CHECK-LABEL: test_set_mask_i32_i16:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x8000
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
@@ -148,7 +148,7 @@ f:
define i32 @test_clear_mask_i32_i8(i32 %x) nounwind {
; CHECK-LABEL: test_clear_mask_i32_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x80
; CHECK-NEXT: csel w0, w8, w0, eq
; CHECK-NEXT: ret
@@ -166,7 +166,7 @@ f:
define i32 @test_set_mask_i32_i8(i32 %x) nounwind {
; CHECK-LABEL: test_set_mask_i32_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x80
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
@@ -184,7 +184,7 @@ f:
define i16 @test_clear_mask_i16_i8(i16 %x) nounwind {
; CHECK-LABEL: test_clear_mask_i16_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x80
; CHECK-NEXT: csel w0, w8, w0, eq
; CHECK-NEXT: ret
@@ -202,7 +202,7 @@ f:
define i16 @test_set_mask_i16_i8(i16 %x) nounwind {
; CHECK-LABEL: test_set_mask_i16_i8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x80
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
@@ -220,7 +220,7 @@ f:
define i16 @test_set_mask_i16_i7(i16 %x) nounwind {
; CHECK-LABEL: test_set_mask_i16_i7:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w8, #42 // =0x2a
; CHECK-NEXT: tst w0, #0x40
; CHECK-NEXT: csel w0, w8, w0, ne
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-signed.ll b/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
index 212f02d86850b..0feac24062647 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
@@ -60,9 +60,9 @@ define i32 @test_signext_b(ptr nocapture readonly %ptr, i8 signext %arg) {
; CHECK-NEXT: mov w8, #20894 // =0x519e
; CHECK-NEXT: add w9, w9, w1
; CHECK-NEXT: sxtb w9, w9
-; CHECK-NEXT: cmp w9, #0
+; CHECK-NEXT: cmn w9, #1
; CHECK-NEXT: mov w9, #42 // =0x2a
-; CHECK-NEXT: csel w0, w9, w8, ge
+; CHECK-NEXT: csel w0, w9, w8, gt
; CHECK-NEXT: ret
entry:
%0 = load i8, ptr %ptr, align 1
@@ -100,9 +100,9 @@ define i32 @test_signext_h(ptr nocapture readonly %ptr, i16 signext %arg) {
; CHECK-NEXT: mov w8, #20894 // =0x519e
; CHECK-NEXT: add w9, w9, w1
; CHECK-NEXT: sxth w9, w9
-; CHECK-NEXT: cmp w9, #0
+; CHECK-NEXT: cmn w9, #1
; CHECK-NEXT: mov w9, #42 // =0x2a
-; CHECK-NEXT: csel w0, w9, w8, ge
+; CHECK-NEXT: csel w0, w9, w8, gt
; CHECK-NEXT: ret
entry:
%0 = load i16, ptr %ptr, align 1
diff --git a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
index d23e23c752350..f475b384b67e0 100644
--- a/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
+++ b/llvm/test/CodeGen/AArch64/use-cr-result-of-dom-icmp-st.ll
@@ -43,10 +43,9 @@ define i64 @ll_a_op_b__1(i64 %a, i64 %b) {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: lsl x8, x0, x1
; CHECK-NEXT: cmn x8, #1
-; CHECK-NEXT: csinc x9, x1, xzr, eq
-; CHECK-NEXT: cmp x8, #0
-; CHECK-NEXT: mul x9, x9, x0
-; CHECK-NEXT: csel x0, x1, x9, ge
+; CHECK-NEXT: csinc x8, x1, xzr, eq
+; CHECK-NEXT: mul x8, x8, x0
+; CHECK-NEXT: csel x0, x1, x8, gt
; CHECK-NEXT: ret
entry:
%shl = shl i64 %a, %b
@@ -162,9 +161,8 @@ define i64 @ll_a__1(i64 %a, i64 %b) {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cmn x0, #1
; CHECK-NEXT: csinc x8, x1, xzr, eq
-; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: mul x8, x8, x0
-; CHECK-NEXT: csel x0, x1, x8, ge
+; CHECK-NEXT: csel x0, x1, x8, gt
; CHECK-NEXT: ret
entry:
%cmp = icmp sgt i64 %a, -1
@@ -278,10 +276,9 @@ define i64 @i_a_op_b__1(i32 signext %a, i32 signext %b) {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: lsl w8, w0, w1
; CHECK-NEXT: cmn w8, #1
-; CHECK-NEXT: csinc w9, w1, wzr, eq
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: mul w9, w9, w0
-; CHECK-NEXT: csel w8, w1, w9, ge
+; CHECK-NEXT: csinc w8, w1, wzr, eq
+; CHECK-NEXT: mul w8, w8, w0
+; CHECK-NEXT: csel w8, w1, w8, gt
; CHECK-NEXT: sxtw x0, w8
; CHECK-NEXT: ret
entry:
@@ -412,9 +409,8 @@ define i64 @i_a__1(i32 signext %a, i32 signext %b) {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: cmn w0, #1
; CHECK-NEXT: csinc w8, w1, wzr, eq
-; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mul w8, w8, w0
-; CHECK-NEXT: csel w8, w1, w8, ge
+; CHECK-NEXT: csel w8, w1, w8, gt
; CHECK-NEXT: sxtw x0, w8
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/win64_vararg.ll b/llvm/test/CodeGen/AArch64/win64_vararg.ll
index aaf4cad608740..d72dee9021251 100644
--- a/llvm/test/CodeGen/AArch64/win64_vararg.ll
+++ b/llvm/test/CodeGen/AArch64/win64_vararg.ll
@@ -133,8 +133,8 @@ define i32 @fp(ptr, i64, ptr, ...) local_unnamed_addr #6 {
; CHECK-NEXT: mov x4, xzr
; CHECK-NEXT: orr x0, x8, #0x2
; CHECK-NEXT: bl __stdio_common_vsprintf
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: csinv w0, w0, wzr, ge
+; CHECK-NEXT: cmn w0, #1
+; CHECK-NEXT: csinv w0, w0, wzr, gt
; CHECK-NEXT: .seh_startepilogue
; CHECK-NEXT: ldp x29, x30, [sp, #24] // 16-byte Folded Reload
; CHECK-NEXT: .seh_save_fplr 24
@@ -268,8 +268,8 @@ define i32 @snprintf(ptr, i64, ptr, ...) local_unnamed_addr #5 {
; CHECK-NEXT: mov x4, xzr
; CHECK-NEXT: orr x0, x8, #0x2
; CHECK-NEXT: bl __stdio_common_vsprintf
-; CHECK-NEXT: cmp w0, #0
-; CHECK-NEXT: csinv w0, w0, wzr, ge
+; CHECK-NEXT: cmn w0, #1
+; CHECK-NEXT: csinv w0, w0, wzr, gt
; CHECK-NEXT: .seh_startepilogue
; CHECK-NEXT: ldp x21, x30, [sp, #32] // 16-byte Folded Reload
; CHECK-NEXT: .seh_save_lrpair x21, 32
More information about the llvm-commits
mailing list