[llvm] [AArch64] Support negation in Global-ISEL unless the left operand is and AND (PR #151054)

via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 28 16:02:27 PDT 2025


https://github.com/AZero13 created https://github.com/llvm/llvm-project/pull/151054

This is exactly what SelDAG does without preventing AND optimization opportunities.

>From 32e2f1addb969a3f350fa7dd8a066ce48823f795 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Mon, 28 Jul 2025 19:02:03 -0400
Subject: [PATCH] [AArch64] Support negation in Global-ISEL unless the left
 operand is an AND

This is exactly what SelDAG does without preventing AND optimization opportunities.
---
 .../GISel/AArch64PostLegalizerLowering.cpp    | 77 ++++++++++++-------
 ...postlegalizer-lowering-adjust-icmp-imm.mir |  8 +-
 .../test/CodeGen/AArch64/fptosi-sat-scalar.ll | 16 ++--
 .../CodeGen/AArch64/select-constant-xor.ll    | 52 ++++++-------
 .../CodeGen/AArch64/selectcc-to-shiftand.ll   | 12 +--
 llvm/test/CodeGen/AArch64/tbz-tbnz.ll         | 76 ++++++++++++------
 6 files changed, 147 insertions(+), 94 deletions(-)

diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 3ba08c8c1d988..b78e50817a589 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -561,6 +561,30 @@ void applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
   MI.eraseFromParent();
 }
 
+bool isLegalCmpImmed(APInt C) {
+  // Works for negative immediates too, as it can be written as an ADDS
+  // instruction with a negated immediate.
+  return isLegalArithImmed(C.abs().getZExtValue());
+}
+
+bool shouldBeAdjustedToZero(MachineInstr *LHS, APInt C, CmpInst::Predicate &P) {
+  // Only works for ANDS and AND.
+  if (LHS->getOpcode() != TargetOpcode::G_AND)
+    return false;
+
+  if (C.isOne() && (P == CmpInst::ICMP_SLT || P == CmpInst::ICMP_SGE)) {
+    P = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT;
+    return true;
+  }
+
+  if (C.isAllOnes() && (P == CmpInst::ICMP_SLE || P == CmpInst::ICMP_SGT)) {
+    P = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE;
+    return true;
+  }
+
+  return false;
+}
+
 /// Determine if it is possible to modify the \p RHS and predicate \p P of a
 /// G_ICMP instruction such that the right-hand side is an arithmetic immediate.
 ///
@@ -569,7 +593,7 @@ void applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
 ///
 /// \note This assumes that the comparison has been legalized.
 std::optional<std::pair<uint64_t, CmpInst::Predicate>>
-tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
+tryAdjustICmpImmAndPred(Register LHS, Register RHS, CmpInst::Predicate P,
                         const MachineRegisterInfo &MRI) {
   const auto &Ty = MRI.getType(RHS);
   if (Ty.isVector())
@@ -582,9 +606,17 @@ tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
   auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS, MRI);
   if (!ValAndVReg)
     return std::nullopt;
-  uint64_t OriginalC = ValAndVReg->Value.getZExtValue();
-  uint64_t C = OriginalC;
-  if (isLegalArithImmed(C))
+
+  APInt C = ValAndVReg->Value;
+  uint64_t OriginalC = C.getZExtValue();
+  MachineInstr *LHSDef = getDefIgnoringCopies(LHS, MRI);
+
+  if (shouldBeAdjustedToZero(LHSDef, C, P)) {
+    // Predicate is already adjusted.
+    return {{0, P}};
+  }
+
+  if (isLegalCmpImmed(C))
     return std::nullopt;
 
   // We have a non-arithmetic immediate. Check if adjusting the immediate and
@@ -600,11 +632,10 @@ tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
     // x sge c => x sgt c - 1
     //
     // When c is not the smallest possible negative number.
-    if ((Size == 64 && static_cast<int64_t>(C) == INT64_MIN) ||
-        (Size == 32 && static_cast<int32_t>(C) == INT32_MIN))
+    if (C.isMinSignedValue())
       return std::nullopt;
     P = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT;
-    C -= 1;
+    C = C - 1;
     break;
   case CmpInst::ICMP_ULT:
   case CmpInst::ICMP_UGE:
@@ -613,11 +644,9 @@ tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
     // x ult c => x ule c - 1
     // x uge c => x ugt c - 1
     //
-    // When c is not zero.
-    if (C == 0)
-      return std::nullopt;
+    // C is already not 0 because 0 is a legal immediate.
     P = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
-    C -= 1;
+    C = C - 1;
     break;
   case CmpInst::ICMP_SLE:
   case CmpInst::ICMP_SGT:
@@ -627,11 +656,10 @@ tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
     // x sgt c => s sge c + 1
     //
     // When c is not the largest possible signed integer.
-    if ((Size == 32 && static_cast<int32_t>(C) == INT32_MAX) ||
-        (Size == 64 && static_cast<int64_t>(C) == INT64_MAX))
+    if (C.isMaxSignedValue())
       return std::nullopt;
     P = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE;
-    C += 1;
+    C = C + 1;
     break;
   case CmpInst::ICMP_ULE:
   case CmpInst::ICMP_UGT:
@@ -640,21 +668,17 @@ tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
     // x ule c => x ult c + 1
     // x ugt c => s uge c + 1
     //
-    // When c is not the largest possible unsigned integer.
-    if ((Size == 32 && static_cast<uint32_t>(C) == UINT32_MAX) ||
-        (Size == 64 && C == UINT64_MAX))
-      return std::nullopt;
+    // -1 is a valid icmp immediate, so it cannot happen here.
     P = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
-    C += 1;
+    C = C + 1;
     break;
   }
 
   // Check if the new constant is valid, and return the updated constant and
   // predicate if it is.
-  if (Size == 32)
-    C = static_cast<uint32_t>(C);
-  if (isLegalArithImmed(C))
-    return {{C, P}};
+  uint64_t NewC = C.getZExtValue();
+  if (isLegalCmpImmed(C))
+    return {{NewC, P}};
 
   auto IsMaterializableInSingleInstruction = [=](uint64_t Imm) {
     SmallVector<AArch64_IMM::ImmInsnModel> Insn;
@@ -663,8 +687,8 @@ tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
   };
 
   if (!IsMaterializableInSingleInstruction(OriginalC) &&
-      IsMaterializableInSingleInstruction(C))
-    return {{C, P}};
+      IsMaterializableInSingleInstruction(NewC))
+    return {{NewC, P}};
 
   return std::nullopt;
 }
@@ -681,9 +705,10 @@ bool matchAdjustICmpImmAndPred(
     MachineInstr &MI, const MachineRegisterInfo &MRI,
     std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
   assert(MI.getOpcode() == TargetOpcode::G_ICMP);
+  Register LHS = MI.getOperand(2).getReg();
   Register RHS = MI.getOperand(3).getReg();
   auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
-  if (auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred, MRI)) {
+  if (auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(LHS, RHS, Pred, MRI)) {
     MatchInfo = *MaybeNewImmAndPred;
     return true;
   }
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
index edc33e340c9b6..92dd797fe25e0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
@@ -649,8 +649,8 @@ body:             |
     ; LOWER-NEXT: {{  $}}
     ; LOWER-NEXT: %reg0:_(s32) = COPY $w0
     ; LOWER-NEXT: %reg1:_(s32) = COPY $w1
-    ; LOWER-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %reg0(s32), [[C]]
+    ; LOWER-NEXT: %cst:_(s32) = G_CONSTANT i32 -1
+    ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %reg0(s32), %cst
     ; LOWER-NEXT: %select:_(s32) = G_SELECT %cmp(s32), %reg0, %reg1
     ; LOWER-NEXT: $w0 = COPY %select(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
@@ -660,8 +660,8 @@ body:             |
     ; SELECT-NEXT: {{  $}}
     ; SELECT-NEXT: %reg0:gpr32common = COPY $w0
     ; SELECT-NEXT: %reg1:gpr32 = COPY $w1
-    ; SELECT-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg0, 0, 0, implicit-def $nzcv
-    ; SELECT-NEXT: %select:gpr32 = CSELWr %reg0, %reg1, 11, implicit $nzcv
+    ; SELECT-NEXT: [[ADDSWri:%[0-9]+]]:gpr32 = ADDSWri %reg0, 1, 0, implicit-def $nzcv
+    ; SELECT-NEXT: %select:gpr32 = CSELWr %reg0, %reg1, 13, implicit $nzcv
     ; SELECT-NEXT: $w0 = COPY %select
     ; SELECT-NEXT: RET_ReallyLR implicit $w0
     %reg0:_(s32) = COPY $w0
diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
index 39e2db3a52d2c..814fc57928ed4 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
@@ -33,8 +33,8 @@ define i1 @test_signed_i1_f32(float %f) nounwind {
 ; CHECK-GI-NEXT:    fcvtzs w8, s0
 ; CHECK-GI-NEXT:    cmp w8, #0
 ; CHECK-GI-NEXT:    csel w8, w8, wzr, lt
-; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-NEXT:    cmn w8, #1
+; CHECK-GI-NEXT:    csinv w8, w8, wzr, gt
 ; CHECK-GI-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-NEXT:    ret
     %x = call i1 @llvm.fptosi.sat.i1.f32(float %f)
@@ -278,8 +278,8 @@ define i1 @test_signed_i1_f64(double %f) nounwind {
 ; CHECK-GI-NEXT:    fcvtzs w8, d0
 ; CHECK-GI-NEXT:    cmp w8, #0
 ; CHECK-GI-NEXT:    csel w8, w8, wzr, lt
-; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-NEXT:    cmn w8, #1
+; CHECK-GI-NEXT:    csinv w8, w8, wzr, gt
 ; CHECK-GI-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-NEXT:    ret
     %x = call i1 @llvm.fptosi.sat.i1.f64(double %f)
@@ -537,8 +537,8 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 ; CHECK-GI-CVT-NEXT:    fcvtzs w8, s0
 ; CHECK-GI-CVT-NEXT:    cmp w8, #0
 ; CHECK-GI-CVT-NEXT:    csel w8, w8, wzr, lt
-; CHECK-GI-CVT-NEXT:    cmp w8, #0
-; CHECK-GI-CVT-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-CVT-NEXT:    cmn w8, #1
+; CHECK-GI-CVT-NEXT:    csinv w8, w8, wzr, gt
 ; CHECK-GI-CVT-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-CVT-NEXT:    ret
 ;
@@ -547,8 +547,8 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 ; CHECK-GI-FP16-NEXT:    fcvtzs w8, h0
 ; CHECK-GI-FP16-NEXT:    cmp w8, #0
 ; CHECK-GI-FP16-NEXT:    csel w8, w8, wzr, lt
-; CHECK-GI-FP16-NEXT:    cmp w8, #0
-; CHECK-GI-FP16-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-FP16-NEXT:    cmn w8, #1
+; CHECK-GI-FP16-NEXT:    csinv w8, w8, wzr, gt
 ; CHECK-GI-FP16-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-FP16-NEXT:    ret
     %x = call i1 @llvm.fptosi.sat.i1.f16(half %f)
diff --git a/llvm/test/CodeGen/AArch64/select-constant-xor.ll b/llvm/test/CodeGen/AArch64/select-constant-xor.ll
index fe9a2c0fad830..16b4a3f2ed78f 100644
--- a/llvm/test/CodeGen/AArch64/select-constant-xor.ll
+++ b/llvm/test/CodeGen/AArch64/select-constant-xor.ll
@@ -25,8 +25,8 @@ define i64 @selecti64i64(i64 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov x8, #-2147483648 // =0xffffffff80000000
 ; CHECK-GI-NEXT:    mov w9, #2147483647 // =0x7fffffff
-; CHECK-GI-NEXT:    cmp x0, #0
-; CHECK-GI-NEXT:    csel x0, x9, x8, ge
+; CHECK-GI-NEXT:    cmn x0, #1
+; CHECK-GI-NEXT:    csel x0, x9, x8, gt
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i64 %a, -1
   %s = select i1 %c, i64 2147483647, i64 -2147483648
@@ -42,9 +42,9 @@ define i32 @selecti64i32(i64 %a) {
 ;
 ; CHECK-GI-LABEL: selecti64i32:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    cmp x0, #0
+; CHECK-GI-NEXT:    cmn x0, #1
 ; CHECK-GI-NEXT:    mov w9, #-2147483648 // =0x80000000
-; CHECK-GI-NEXT:    cset w8, ge
+; CHECK-GI-NEXT:    cset w8, gt
 ; CHECK-GI-NEXT:    sbfx w8, w8, #0, #1
 ; CHECK-GI-NEXT:    add w0, w8, w9
 ; CHECK-GI-NEXT:    ret
@@ -65,8 +65,8 @@ define i64 @selecti32i64(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov x8, #-2147483648 // =0xffffffff80000000
 ; CHECK-GI-NEXT:    mov w9, #2147483647 // =0x7fffffff
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x0, x9, x8, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel x0, x9, x8, gt
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i64 2147483647, i64 -2147483648
@@ -98,8 +98,8 @@ define i32 @selecti32i32(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #-85 // =0xffffffab
 ; CHECK-GI-NEXT:    mov w9, #84 // =0x54
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w0, w9, w8, gt
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i32 84, i32 -85
@@ -117,8 +117,8 @@ define i8 @selecti32i8(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #84 // =0x54
 ; CHECK-GI-NEXT:    mov w9, #-85 // =0xffffffab
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w8, w9, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w0, w8, w9, gt
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i8 84, i8 -85
@@ -135,11 +135,11 @@ define i32 @selecti8i32(i8 %a) {
 ;
 ; CHECK-GI-LABEL: selecti8i32:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    sxtb w8, w0
+; CHECK-GI-NEXT:    mov w8, #-1 // =0xffffffff
 ; CHECK-GI-NEXT:    mov w9, #-85 // =0xffffffab
 ; CHECK-GI-NEXT:    mov w10, #84 // =0x54
-; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csel w0, w10, w9, ge
+; CHECK-GI-NEXT:    cmp w8, w0, sxtb
+; CHECK-GI-NEXT:    csel w0, w10, w9, lt
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i8 %a, -1
   %s = select i1 %c, i32 84, i32 -85
@@ -192,8 +192,8 @@ define i32 @selecti32i32_0(i32 %a) {
 ;
 ; CHECK-GI-LABEL: selecti32i32_0:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, lt
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    cset w8, le
 ; CHECK-GI-NEXT:    sbfx w0, w8, #0, #1
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
@@ -210,8 +210,8 @@ define i32 @selecti32i32_m1(i32 %a) {
 ;
 ; CHECK-GI-LABEL: selecti32i32_m1:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    cset w8, gt
 ; CHECK-GI-NEXT:    sbfx w0, w8, #0, #1
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
@@ -229,8 +229,8 @@ define i32 @selecti32i32_1(i32 %a) {
 ; CHECK-GI-LABEL: selecti32i32_1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #-2 // =0xfffffffe
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csinc w0, w8, wzr, lt
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csinc w0, w8, wzr, le
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i32 1, i32 -2
@@ -286,8 +286,8 @@ define i32 @selecti32i32_sle(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #12 // =0xc
 ; CHECK-GI-NEXT:    mov w9, #-13 // =0xfffffff3
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, lt
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w0, w9, w8, le
 ; CHECK-GI-NEXT:    ret
   %c = icmp sle i32 %a, -1
   %s = select i1 %c, i32 -13, i32 12
@@ -305,8 +305,8 @@ define i32 @selecti32i32_sgt(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #12 // =0xc
 ; CHECK-GI-NEXT:    mov w9, #-13 // =0xfffffff3
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, lt
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w0, w9, w8, le
 ; CHECK-GI-NEXT:    ret
   %c = icmp sle i32 %a, -1
   %s = select i1 %c, i32 -13, i32 12
@@ -327,9 +327,9 @@ define i32 @oneusecmp(i32 %a, i32 %b, i32 %d) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #127 // =0x7f
 ; CHECK-GI-NEXT:    mov w9, #-128 // =0xffffff80
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w8, w9, w8, lt
-; CHECK-GI-NEXT:    csel w9, w2, w1, lt
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w8, w9, w8, le
+; CHECK-GI-NEXT:    csel w9, w2, w1, le
 ; CHECK-GI-NEXT:    add w0, w8, w9
 ; CHECK-GI-NEXT:    ret
   %c = icmp sle i32 %a, -1
diff --git a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
index 0d4a636446164..8c4cf5b1a9075 100644
--- a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
@@ -92,8 +92,8 @@ define i32 @pos_sel_constants(i32 %a) {
 ; CHECK-GI-LABEL: pos_sel_constants:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #5 // =0x5
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w8, wzr, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w0, w8, wzr, gt
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
   %retval = select i1 %tmp.1, i32 5, i32 0
@@ -111,8 +111,8 @@ define i32 @pos_sel_special_constant(i32 %a) {
 ;
 ; CHECK-GI-LABEL: pos_sel_special_constant:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    cset w8, gt
 ; CHECK-GI-NEXT:    lsl w0, w8, #9
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
@@ -130,8 +130,8 @@ define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) {
 ;
 ; CHECK-GI-LABEL: pos_sel_variable_and_zero:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w1, wzr, ge
+; CHECK-GI-NEXT:    cmn w0, #1
+; CHECK-GI-NEXT:    csel w0, w1, wzr, gt
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
   %retval = select i1 %tmp.1, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 3fe7346b3db28..c76c92ac9a6c9 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -61,18 +61,32 @@ if.end:
 }
 
 define void @test3(i32 %a) {
-; CHECK-LABEL: test3:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp w0, #12
-; CHECK-NEXT:    b.mi .LBB2_2
-; CHECK-NEXT:  // %bb.1: // %if.then
-; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    bl t
-; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:  .LBB2_2: // %if.end
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test3:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmp w0, #12
+; CHECK-SD-NEXT:    b.mi .LBB2_2
+; CHECK-SD-NEXT:  // %bb.1: // %if.then
+; CHECK-SD-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-NEXT:    bl t
+; CHECK-SD-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-NEXT:  .LBB2_2: // %if.end
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test3:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    sub w8, w0, #12
+; CHECK-GI-NEXT:    cmn w8, #1
+; CHECK-GI-NEXT:    b.le .LBB2_2
+; CHECK-GI-NEXT:  // %bb.1: // %if.then
+; CHECK-GI-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-NEXT:    bl t
+; CHECK-GI-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT:  .LBB2_2: // %if.end
+; CHECK-GI-NEXT:    ret
 entry:
   %sub = add nsw i32 %a, -12
   %cmp = icmp sgt i32 %sub, -1
@@ -88,18 +102,32 @@ if.end:
 }
 
 define void @test4(i64 %a) {
-; CHECK-LABEL: test4:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    cmp x0, #12
-; CHECK-NEXT:    b.mi .LBB3_2
-; CHECK-NEXT:  // %bb.1: // %if.then
-; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    bl t
-; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:  .LBB3_2: // %if.end
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test4:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmp x0, #12
+; CHECK-SD-NEXT:    b.mi .LBB3_2
+; CHECK-SD-NEXT:  // %bb.1: // %if.then
+; CHECK-SD-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-NEXT:    bl t
+; CHECK-SD-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-NEXT:  .LBB3_2: // %if.end
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test4:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    sub x8, x0, #12
+; CHECK-GI-NEXT:    cmn x8, #1
+; CHECK-GI-NEXT:    b.le .LBB3_2
+; CHECK-GI-NEXT:  // %bb.1: // %if.then
+; CHECK-GI-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-NEXT:    bl t
+; CHECK-GI-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT:  .LBB3_2: // %if.end
+; CHECK-GI-NEXT:    ret
 entry:
   %sub = add nsw i64 %a, -12
   %cmp = icmp sgt i64 %sub, -1



More information about the llvm-commits mailing list