[llvm] 19dc9c9 - [AArch64][GlobalISel] Move imm adjustment for G_ICMP to post-legalizer lowering

Jessica Paquette via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 22 15:28:03 PDT 2020


Author: Jessica Paquette
Date: 2020-10-22T15:27:36-07:00
New Revision: 19dc9c97804fdb658b06f80b8f6e9115b7542bd5

URL: https://github.com/llvm/llvm-project/commit/19dc9c97804fdb658b06f80b8f6e9115b7542bd5
DIFF: https://github.com/llvm/llvm-project/commit/19dc9c97804fdb658b06f80b8f6e9115b7542bd5.diff

LOG: [AArch64][GlobalISel] Move imm adjustment for G_ICMP to post-legalizer lowering

Move the code which adjusts the immediate/predicate on a G_ICMP to
AArch64PostLegalizerLowering.

This

- Reduces the number of places we need to test for optimized compares in the
selector. We know that the compare should have been simplified by the time it
hits the selector, so we can avoid testing this in selects, brconds, etc.

- Allows us to potentially fold more compares (previously, this optimization
was only done after calling `tryFoldCompare`, this may allow us to hit some more
TST cases)

- Simplifies the selection code in `emitIntegerCompare` significantly; we can
just use an emitSUBS function.

- Allows us to avoid checking that the predicate has been updated after
`emitIntegerCompare`.

Also add a utility header file for things that may be useful in the selector
and various combiners. No need for an implementation file at this point, since
it's just one constexpr function for now. I've run into a couple cases where
having one of these would be handy, so might as well add it here. There are
a couple functions in the selector that can probably be factored out into
here.

Differential Revision: https://reviews.llvm.org/D89823

Added: 
    llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir

Modified: 
    llvm/lib/Target/AArch64/AArch64Combine.td
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir

Removed: 
    llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index bf2886ab94bd..1e0be249e525 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -83,12 +83,24 @@ def vashr_vlshr_imm : GICombineRule<
   (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
 >;
 
+def adjust_icmp_imm_matchdata :
+  GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
+def adjust_icmp_imm : GICombineRule <
+  (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
+  (match (wip_match_opcode G_ICMP):$root,
+          [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
+  (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
+>;
+
+def icmp_lowering : GICombineGroup<[adjust_icmp_imm]>;
+
 // Post-legalization combines which should happen at all optimization levels.
 // (E.g. ones that facilitate matching for the selector) For example, matching
 // pseudos.
 def AArch64PostLegalizerLoweringHelper
     : GICombinerHelper<"AArch64GenPostLegalizerLoweringHelper",
-                       [shuffle_vector_pseudos, vashr_vlshr_imm]> {
+                       [shuffle_vector_pseudos, vashr_vlshr_imm,
+                        icmp_lowering]> {
   let DisableRuleOption = "aarch64postlegalizerlowering-disable-rule";
 }
 

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
new file mode 100644
index 000000000000..bed1136c7a67
--- /dev/null
+++ b/llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
@@ -0,0 +1,29 @@
+//===- AArch64GlobalISelUtils.h ----------------------------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file APIs for AArch64-specific helper functions used in the GlobalISel
+/// pipeline.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_AARCH64_GISEL_AARCH64GLOBALISELUTILS_H
+#define LLVM_LIB_TARGET_AARCH64_GISEL_AARCH64GLOBALISELUTILS_H
+
+#include <cstdint>
+
+namespace llvm {
+namespace AArch64GISelUtils {
+
+/// \returns true if \p C is a legal immediate operand for an arithmetic
+/// instruction.
+constexpr bool isLegalArithImmed(const uint64_t C) {
+  return (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
+}
+
+} // namespace AArch64GISelUtils
+} // namespace llvm
+
+#endif

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 7aef76edb4a9..4da3f1299702 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -165,14 +165,9 @@ class AArch64InstructionSelector : public InstructionSelector {
                                  MachineIRBuilder &MIRBuilder) const;
 
   // Emit an integer compare between LHS and RHS, which checks for Predicate.
-  //
-  // This returns the produced compare instruction, and the predicate which
-  // was ultimately used in the compare. The predicate may 
diff er from what
-  // is passed in \p Predicate due to optimization.
-  std::pair<MachineInstr *, CmpInst::Predicate>
-  emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
-                     MachineOperand &Predicate,
-                     MachineIRBuilder &MIRBuilder) const;
+  MachineInstr *emitIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
+                                   MachineOperand &Predicate,
+                                   MachineIRBuilder &MIRBuilder) const;
 
   /// Emit a floating point comparison between \p LHS and \p RHS.
   MachineInstr *emitFPCompare(Register LHS, Register RHS,
@@ -229,6 +224,8 @@ class AArch64InstructionSelector : public InstructionSelector {
                         MachineIRBuilder &MIRBuilder) const;
   MachineInstr *emitADDS(Register Dst, MachineOperand &LHS, MachineOperand &RHS,
                          MachineIRBuilder &MIRBuilder) const;
+  MachineInstr *emitSUBS(Register Dst, MachineOperand &LHS, MachineOperand &RHS,
+                         MachineIRBuilder &MIRBuilder) const;
   MachineInstr *emitCMN(MachineOperand &LHS, MachineOperand &RHS,
                         MachineIRBuilder &MIRBuilder) const;
   MachineInstr *emitTST(MachineOperand &LHS, MachineOperand &RHS,
@@ -382,13 +379,6 @@ class AArch64InstructionSelector : public InstructionSelector {
   MachineInstr *tryFoldIntegerCompare(MachineOperand &LHS, MachineOperand &RHS,
                                       MachineOperand &Predicate,
                                       MachineIRBuilder &MIRBuilder) const;
-  MachineInstr *tryOptArithImmedIntegerCompare(MachineOperand &LHS,
-                                               MachineOperand &RHS,
-                                               CmpInst::Predicate &Predicate,
-                                               MachineIRBuilder &MIB) const;
-  MachineInstr *tryOptArithShiftedCompare(MachineOperand &LHS,
-                                          MachineOperand &RHS,
-                                          MachineIRBuilder &MIB) const;
 
   /// Return true if \p MI is a load or store of \p NumBytes bytes.
   bool isLoadStoreOfNumBytes(const MachineInstr &MI, unsigned NumBytes) const;
@@ -1385,9 +1375,10 @@ bool AArch64InstructionSelector::selectCompareBranch(
 
   if (!VRegAndVal || VRegAndVal->Value != 0) {
     // If we can't select a CBZ then emit a cmp + Bcc.
-    MachineInstr *Cmp;
-    std::tie(Cmp, Pred) = emitIntegerCompare(
-        CCMI->getOperand(2), CCMI->getOperand(3), CCMI->getOperand(1), MIB);
+    auto Pred =
+        static_cast<CmpInst::Predicate>(CCMI->getOperand(1).getPredicate());
+    emitIntegerCompare(CCMI->getOperand(2), CCMI->getOperand(3),
+                       CCMI->getOperand(1), MIB);
     const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(Pred);
     MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB);
     I.eraseFromParent();
@@ -2867,10 +2858,9 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
     }
 
     MachineIRBuilder MIRBuilder(I);
-    MachineInstr *Cmp;
-    CmpInst::Predicate Pred;
-    std::tie(Cmp, Pred) = emitIntegerCompare(I.getOperand(2), I.getOperand(3),
-                                             I.getOperand(1), MIRBuilder);
+    auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
+    emitIntegerCompare(I.getOperand(2), I.getOperand(3), I.getOperand(1),
+                       MIRBuilder);
     emitCSetForICMP(I.getOperand(0).getReg(), Pred, MIRBuilder);
     I.eraseFromParent();
     return true;
@@ -3845,7 +3835,7 @@ MachineInstr *AArch64InstructionSelector::emitBinOp(
   MachineRegisterInfo &MRI = MIRBuilder.getMF().getRegInfo();
   assert(LHS.isReg() && RHS.isReg() && "Expected register operands?");
   auto Ty = MRI.getType(LHS.getReg());
-  assert(Ty.isScalar() && "Expected a scalar?");
+  assert(!Ty.isVector() && "Expected a scalar or pointer?");
   unsigned Size = Ty.getSizeInBits();
   assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit type only");
   bool Is32Bit = Size == 32;
@@ -3881,6 +3871,17 @@ AArch64InstructionSelector::emitADDS(Register Dst, MachineOperand &LHS,
   return emitBinOp(OpcTable, Dst, LHS, RHS, MIRBuilder);
 }
 
+MachineInstr *
+AArch64InstructionSelector::emitSUBS(Register Dst, MachineOperand &LHS,
+                                     MachineOperand &RHS,
+                                     MachineIRBuilder &MIRBuilder) const {
+  const std::array<std::array<unsigned, 2>, 3> OpcTable{
+      {{AArch64::SUBSXri, AArch64::SUBSWri},
+       {AArch64::SUBSXrs, AArch64::SUBSWrs},
+       {AArch64::SUBSXrr, AArch64::SUBSWrr}}};
+  return emitBinOp(OpcTable, Dst, LHS, RHS, MIRBuilder);
+}
+
 MachineInstr *
 AArch64InstructionSelector::emitCMN(MachineOperand &LHS, MachineOperand &RHS,
                                     MachineIRBuilder &MIRBuilder) const {
@@ -3917,8 +3918,7 @@ AArch64InstructionSelector::emitTST(MachineOperand &LHS, MachineOperand &RHS,
   return emitInstr(OpcTable[2][Is32Bit], {Ty}, {LHS, RHS}, MIRBuilder);
 }
 
-std::pair<MachineInstr *, CmpInst::Predicate>
-AArch64InstructionSelector::emitIntegerCompare(
+MachineInstr *AArch64InstructionSelector::emitIntegerCompare(
     MachineOperand &LHS, MachineOperand &RHS, MachineOperand &Predicate,
     MachineIRBuilder &MIRBuilder) const {
   assert(LHS.isReg() && RHS.isReg() && "Expected LHS and RHS to be registers!");
@@ -3928,24 +3928,11 @@ AArch64InstructionSelector::emitIntegerCompare(
   assert(!CmpTy.isVector() && "Expected scalar or pointer");
   unsigned Size = CmpTy.getSizeInBits();
   assert((Size == 32 || Size == 64) && "Expected a 32-bit or 64-bit LHS/RHS?");
-  auto P = static_cast<CmpInst::Predicate>(Predicate.getPredicate());
-
   // Fold the compare into a cmn or tst if possible.
   if (auto FoldCmp = tryFoldIntegerCompare(LHS, RHS, Predicate, MIRBuilder))
-    return {FoldCmp, P};
-
-  // Compares need special handling for their shifted/immediate forms. We
-  // may be able to modify the predicate or an illegal constant to perform
-  // some folding.
-  if (auto ImmedCmp = tryOptArithImmedIntegerCompare(LHS, RHS, P, MIRBuilder))
-    return {ImmedCmp, P};
-  if (auto ShiftedCmp = tryOptArithShiftedCompare(LHS, RHS, MIRBuilder))
-    return {ShiftedCmp, P};
-  unsigned CmpOpc = Size == 32 ? AArch64::SUBSWrr : AArch64::SUBSXrr;
-  auto CmpMI = MIRBuilder.buildInstr(
-      CmpOpc, {MRI.cloneVirtualRegister(LHS.getReg())}, {LHS, RHS});
-  constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI);
-  return {&*CmpMI, P};
+    return FoldCmp;
+  auto Dst = MRI.cloneVirtualRegister(LHS.getReg());
+  return emitSUBS(Dst, LHS, RHS, MIRBuilder);
 }
 
 MachineInstr *AArch64InstructionSelector::emitCSetForFCmp(
@@ -4177,19 +4164,14 @@ bool AArch64InstructionSelector::tryOptSelect(MachineInstr &I) const {
 
   AArch64CC::CondCode CondCode;
   if (CondOpc == TargetOpcode::G_ICMP) {
-    MachineInstr *Cmp;
-    CmpInst::Predicate Pred;
-
-    std::tie(Cmp, Pred) =
-        emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3),
-                           CondDef->getOperand(1), MIB);
-
-    // Have to collect the CondCode after emitIntegerCompare, since it can
-    // update the predicate.
+    auto Pred =
+        static_cast<CmpInst::Predicate>(CondDef->getOperand(1).getPredicate());
     CondCode = changeICMPPredToAArch64CC(Pred);
+    emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3),
+                       CondDef->getOperand(1), MIB);
   } else {
     // Get the condition code for the select.
-    CmpInst::Predicate Pred =
+    auto Pred =
         static_cast<CmpInst::Predicate>(CondDef->getOperand(1).getPredicate());
     AArch64CC::CondCode CondCode2;
     changeFCMPPredToAArch64CC(Pred, CondCode, CondCode2);
@@ -4336,143 +4318,6 @@ MachineInstr *AArch64InstructionSelector::tryFoldIntegerCompare(
   return nullptr;
 }
 
-MachineInstr *AArch64InstructionSelector::tryOptArithImmedIntegerCompare(
-    MachineOperand &LHS, MachineOperand &RHS, CmpInst::Predicate &P,
-    MachineIRBuilder &MIB) const {
-  // Attempt to select the immediate form of an integer compare.
-  MachineRegisterInfo &MRI = *MIB.getMRI();
-  auto Ty = MRI.getType(LHS.getReg());
-  assert(!Ty.isVector() && "Expected scalar or pointer only?");
-  unsigned Size = Ty.getSizeInBits();
-  assert((Size == 32 || Size == 64) &&
-         "Expected 32 bit or 64 bit compare only?");
-
-  // Check if this is a case we can already handle.
-  InstructionSelector::ComplexRendererFns ImmFns;
-  ImmFns = selectArithImmed(RHS);
-
-  if (!ImmFns) {
-    // We didn't get a rendering function, but we may still have a constant.
-    auto MaybeImmed = getImmedFromMO(RHS);
-    if (!MaybeImmed)
-      return nullptr;
-
-    // We have a constant, but it doesn't fit. Try adjusting it by one and
-    // updating the predicate if possible.
-    uint64_t C = *MaybeImmed;
-    CmpInst::Predicate NewP;
-    switch (P) {
-    default:
-      return nullptr;
-    case CmpInst::ICMP_SLT:
-    case CmpInst::ICMP_SGE:
-      // Check for
-      //
-      // x slt c => x sle c - 1
-      // x sge c => x sgt c - 1
-      //
-      // When c is not the smallest possible negative number.
-      if ((Size == 64 && static_cast<int64_t>(C) == INT64_MIN) ||
-          (Size == 32 && static_cast<int32_t>(C) == INT32_MIN))
-        return nullptr;
-      NewP = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT;
-      C -= 1;
-      break;
-    case CmpInst::ICMP_ULT:
-    case CmpInst::ICMP_UGE:
-      // Check for
-      //
-      // x ult c => x ule c - 1
-      // x uge c => x ugt c - 1
-      //
-      // When c is not zero.
-      if (C == 0)
-        return nullptr;
-      NewP = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
-      C -= 1;
-      break;
-    case CmpInst::ICMP_SLE:
-    case CmpInst::ICMP_SGT:
-      // Check for
-      //
-      // x sle c => x slt c + 1
-      // x sgt c => s sge c + 1
-      //
-      // When c is not the largest possible signed integer.
-      if ((Size == 32 && static_cast<int32_t>(C) == INT32_MAX) ||
-          (Size == 64 && static_cast<int64_t>(C) == INT64_MAX))
-        return nullptr;
-      NewP = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE;
-      C += 1;
-      break;
-    case CmpInst::ICMP_ULE:
-    case CmpInst::ICMP_UGT:
-      // Check for
-      //
-      // x ule c => x ult c + 1
-      // x ugt c => s uge c + 1
-      //
-      // When c is not the largest possible unsigned integer.
-      if ((Size == 32 && static_cast<uint32_t>(C) == UINT32_MAX) ||
-          (Size == 64 && C == UINT64_MAX))
-        return nullptr;
-      NewP = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
-      C += 1;
-      break;
-    }
-
-    // Check if the new constant is valid.
-    if (Size == 32)
-      C = static_cast<uint32_t>(C);
-    ImmFns = select12BitValueWithLeftShift(C);
-    if (!ImmFns)
-      return nullptr;
-    P = NewP;
-  }
-
-  // At this point, we know we can select an immediate form. Go ahead and do
-  // that.
-  unsigned Opc;
-  if (Size == 32) {
-    Opc = AArch64::SUBSWri;
-  } else {
-    Opc = AArch64::SUBSXri;
-  }
-
-  auto CmpMI = MIB.buildInstr(Opc, {Ty}, {LHS.getReg()});
-  for (auto &RenderFn : *ImmFns)
-    RenderFn(CmpMI);
-  constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI);
-  return &*CmpMI;
-}
-
-MachineInstr *AArch64InstructionSelector::tryOptArithShiftedCompare(
-    MachineOperand &LHS, MachineOperand &RHS, MachineIRBuilder &MIB) const {
-  // We are looking for the following pattern:
-  //
-  // shift = G_SHL/ASHR/LHSR y, c
-  // ...
-  // cmp = G_ICMP pred, something, shift
-  //
-  // Since we will select the G_ICMP to a SUBS, we can potentially fold the
-  // shift into the subtract.
-  static const unsigned OpcTable[2] = {AArch64::SUBSWrs, AArch64::SUBSXrs};
-  auto ImmFns = selectShiftedRegister(RHS);
-  if (!ImmFns)
-    return nullptr;
-  MachineRegisterInfo &MRI = *MIB.getMRI();
-  auto Ty = MRI.getType(LHS.getReg());
-  assert(!Ty.isVector() && "Expected scalar or pointer only?");
-  unsigned Size = Ty.getSizeInBits();
-  bool Idx = (Size == 64);
-  unsigned Opc = OpcTable[Idx];
-  auto CmpMI = MIB.buildInstr(Opc, {Ty}, {LHS.getReg()});
-  for (auto &RenderFn : *ImmFns)
-    RenderFn(CmpMI);
-  constrainSelectedInstRegOperands(*CmpMI, TII, TRI, RBI);
-  return &*CmpMI;
-}
-
 bool AArch64InstructionSelector::tryOptShuffleDupLane(
     MachineInstr &I, LLT DstTy, LLT SrcTy, ArrayRef<int> Mask,
     MachineRegisterInfo &MRI) const {

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 61a03f6052ae..9b433e0e90c6 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -20,6 +20,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "AArch64TargetMachine.h"
+#include "AArch64GlobalISelUtils.h"
 #include "MCTargetDesc/AArch64MCTargetDesc.h"
 #include "llvm/CodeGen/GlobalISel/Combiner.h"
 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
@@ -39,6 +40,7 @@
 
 using namespace llvm;
 using namespace MIPatternMatch;
+using namespace AArch64GISelUtils;
 
 /// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
 ///
@@ -416,6 +418,138 @@ static bool applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
   return true;
 }
 
+/// Determine if it is possible to modify the \p RHS and predicate \p P of a
+/// G_ICMP instruction such that the right-hand side is an arithmetic immediate.
+///
+/// \returns A pair containing the updated immediate and predicate which may
+/// be used to optimize the instruction.
+///
+/// \note This assumes that the comparison has been legalized.
+Optional<std::pair<uint64_t, CmpInst::Predicate>>
+tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
+                          const MachineRegisterInfo &MRI) {
+  const auto &Ty = MRI.getType(RHS);
+  if (Ty.isVector())
+    return None;
+  unsigned Size = Ty.getSizeInBits();
+  assert((Size == 32 || Size == 64) && "Expected 32 or 64 bit compare only?");
+
+  // If the RHS is not a constant, or the RHS is already a valid arithmetic
+  // immediate, then there is nothing to change.
+  auto ValAndVReg = getConstantVRegValWithLookThrough(RHS, MRI);
+  if (!ValAndVReg)
+    return None;
+  uint64_t C = ValAndVReg->Value;
+  if (isLegalArithImmed(C))
+    return None;
+
+  // We have a non-arithmetic immediate. Check if adjusting the immediate and
+  // adjusting the predicate will result in a legal arithmetic immediate.
+  switch (P) {
+  default:
+    return None;
+  case CmpInst::ICMP_SLT:
+  case CmpInst::ICMP_SGE:
+    // Check for
+    //
+    // x slt c => x sle c - 1
+    // x sge c => x sgt c - 1
+    //
+    // When c is not the smallest possible negative number.
+    if ((Size == 64 && static_cast<int64_t>(C) == INT64_MIN) ||
+        (Size == 32 && static_cast<int32_t>(C) == INT32_MIN))
+      return None;
+    P = (P == CmpInst::ICMP_SLT) ? CmpInst::ICMP_SLE : CmpInst::ICMP_SGT;
+    C -= 1;
+    break;
+  case CmpInst::ICMP_ULT:
+  case CmpInst::ICMP_UGE:
+    // Check for
+    //
+    // x ult c => x ule c - 1
+    // x uge c => x ugt c - 1
+    //
+    // When c is not zero.
+    if (C == 0)
+      return None;
+    P = (P == CmpInst::ICMP_ULT) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
+    C -= 1;
+    break;
+  case CmpInst::ICMP_SLE:
+  case CmpInst::ICMP_SGT:
+    // Check for
+    //
+    // x sle c => x slt c + 1
+    // x sgt c => s sge c + 1
+    //
+    // When c is not the largest possible signed integer.
+    if ((Size == 32 && static_cast<int32_t>(C) == INT32_MAX) ||
+        (Size == 64 && static_cast<int64_t>(C) == INT64_MAX))
+      return None;
+    P = (P == CmpInst::ICMP_SLE) ? CmpInst::ICMP_SLT : CmpInst::ICMP_SGE;
+    C += 1;
+    break;
+  case CmpInst::ICMP_ULE:
+  case CmpInst::ICMP_UGT:
+    // Check for
+    //
+    // x ule c => x ult c + 1
+    // x ugt c => s uge c + 1
+    //
+    // When c is not the largest possible unsigned integer.
+    if ((Size == 32 && static_cast<uint32_t>(C) == UINT32_MAX) ||
+        (Size == 64 && C == UINT64_MAX))
+      return None;
+    P = (P == CmpInst::ICMP_ULE) ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
+    C += 1;
+    break;
+  }
+
+  // Check if the new constant is valid, and return the updated constant and
+  // predicate if it is.
+  if (Size == 32)
+    C = static_cast<uint32_t>(C);
+  if (!isLegalArithImmed(C))
+    return None;
+  return {{C, P}};
+}
+
+/// Determine whether or not it is possible to update the RHS and predicate of
+/// a G_ICMP instruction such that the RHS will be selected as an arithmetic
+/// immediate.
+///
+/// \p MI - The G_ICMP instruction
+/// \p MatchInfo - The new RHS immediate and predicate on success
+///
+/// See tryAdjustICmpImmAndPred for valid transformations.
+bool matchAdjustICmpImmAndPred(
+    MachineInstr &MI, const MachineRegisterInfo &MRI,
+    std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
+  assert(MI.getOpcode() == TargetOpcode::G_ICMP);
+  Register RHS = MI.getOperand(3).getReg();
+  auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
+  if (auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred, MRI)) {
+    MatchInfo = *MaybeNewImmAndPred;
+    return true;
+  }
+  return false;
+}
+
+bool applyAdjustICmpImmAndPred(
+    MachineInstr &MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
+    MachineIRBuilder &MIB, GISelChangeObserver &Observer) {
+  MIB.setInstrAndDebugLoc(MI);
+  MachineOperand &RHS = MI.getOperand(3);
+  MachineRegisterInfo &MRI = *MIB.getMRI();
+  auto Cst = MIB.buildConstant(MRI.cloneVirtualRegister(RHS.getReg()),
+                               MatchInfo.first);
+  Observer.changingInstr(MI);
+  RHS.setReg(Cst->getOperand(0).getReg());
+  MI.getOperand(1).setPredicate(MatchInfo.second);
+  Observer.changedInstr(MI);
+  return true;
+}
+
 #define AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS
 #include "AArch64GenPostLegalizeGILowering.inc"
 #undef AARCH64POSTLEGALIZERLOWERINGHELPER_GENCOMBINERHELPER_DEPS

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
index 9ccb42010736..adc266c6394e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
@@ -67,37 +67,6 @@ body:             |
 
 ...
 ---
-name:            check_update_predicate
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0, $w1
-    ; The G_ICMP is optimized here to be a slt comparison by adding 1 to the
-    ; constant. So, the CSELWr should use the predicate code 11, rather than
-    ; 13.
-
-    ; CHECK-LABEL: name: check_update_predicate
-    ; CHECK: liveins: $w0, $w1
-    ; CHECK: %copy1:gpr32sp = COPY $w0
-    ; CHECK: %copy2:gpr32 = COPY $w1
-    ; CHECK: %cst:gpr32 = MOVi32imm -1
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %copy1, 0, 0, implicit-def $nzcv
-    ; CHECK: %select:gpr32 = CSELWr %cst, %copy2, 11, implicit $nzcv
-    ; CHECK: $w0 = COPY %select
-    ; CHECK: RET_ReallyLR implicit $w0
-    %copy1:gpr(s32) = COPY $w0
-    %copy2:gpr(s32) = COPY $w1
-    %cst:gpr(s32) = G_CONSTANT i32 -1
-    %cmp:gpr(s32) = G_ICMP intpred(sle), %copy1(s32), %cst
-    %trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    %select:gpr(s32) = G_SELECT %trunc(s1), %cst, %copy2
-    $w0 = COPY %select(s32)
-    RET_ReallyLR implicit $w0
-...
----
 name:            csinc
 alignment:       4
 legalized:       true

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
new file mode 100644
index 000000000000..3d1d3af7221f
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
@@ -0,0 +1,679 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LOWER
+# RUN: llc -mtriple=aarch64 -global-isel -start-before=aarch64-postlegalizer-lowering -stop-after=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SELECT
+
+# Verify that we will adjust the constant + predicate of a compare when it will
+# allow us to fold an immediate into a compare.
+...
+---
+name:            slt_to_sle_s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; x slt c => x sle c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: slt_to_sle_s32
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: slt_to_sle_s32
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32sp = COPY $w0
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 4097
+    %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            slt_to_sle_s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; x slt c => x sle c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: slt_to_sle_s64
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: slt_to_sle_s64
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 4097
+    %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            sge_to_sgt_s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; x sge c => x sgt c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: sge_to_sgt_s32
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s32), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: sge_to_sgt_s32
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32sp = COPY $w0
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 4097
+    %cmp:_(s32) = G_ICMP intpred(sge), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            sge_to_sgt_s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; x sge c => x sgt c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: sge_to_sgt_s64
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: sge_to_sgt_s64
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 4097
+    %cmp:_(s32) = G_ICMP intpred(sge), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            ult_to_ule_s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; x ult c => x ule c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: ult_to_ule_s32
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(ule), %reg(s32), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: ult_to_ule_s32
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32sp = COPY $w0
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 4097
+    %cmp:_(s32) = G_ICMP intpred(ult), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            ult_to_ule_s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; x ult c => x ule c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: ult_to_ule_s64
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(ule), %reg(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: ult_to_ule_s64
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 4097
+    %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            uge_to_ugt_s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; x uge c => x ugt c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: uge_to_ugt_s32
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(ugt), %reg(s32), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: uge_to_ugt_s32
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32sp = COPY $w0
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 4097
+    %cmp:_(s32) = G_ICMP intpred(uge), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            uge_to_ugt_s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; x uge c => x ugt c - 1
+    ;
+    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: uge_to_ugt_s64
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4096
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(ugt), %reg(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: uge_to_ugt_s64
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 1, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 4097
+    %cmp:_(s32) = G_ICMP intpred(uge), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            sle_to_slt_s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; x sle c => x slt c + 1
+    ;
+    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
+    ; left shift.
+    ;
+    ; (We can't use 4095 here, because that's a legal arithmetic immediate.)
+
+    ; LOWER-LABEL: name: sle_to_slt_s32
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8192
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: sle_to_slt_s32
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32sp = COPY $w0
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 2, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 8191
+    %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            sle_to_slt_s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; x sle c => x slt c + 1
+    ;
+    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: sle_to_slt_s64
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8192
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: sle_to_slt_s64
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 2, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 8191
+    %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            sgt_to_sge_s32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; x sgt c => s sge c + 1
+    ;
+    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: sgt_to_sge_s32
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8192
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sge), %reg(s32), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: sgt_to_sge_s32
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32sp = COPY $w0
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg, 2, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 8191
+    %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            sgt_to_sge_s64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; x sgt c => s sge c + 1
+    ;
+    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
+    ; left shift.
+
+    ; LOWER-LABEL: name: sgt_to_sge_s64
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8192
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sge), %reg(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: sgt_to_sge_s64
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 2, 12, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 8191
+    %cmp:_(s32) = G_ICMP intpred(sgt), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            no_opt_int32_min
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; The cmp should not change.
+    ;
+    ; If we subtract 1 from the constant, it will wrap around, and so it's not
+    ; true that
+    ;
+    ; x slt c => x sle c - 1
+    ; x sge c => x sgt c - 1
+
+    ; LOWER-LABEL: name: no_opt_int32_min
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: %cst:_(s32) = G_CONSTANT i32 -2147483648
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: no_opt_int32_min
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32 = COPY $w0
+    ; SELECT: %cst:gpr32 = MOVi32imm -2147483648
+    ; SELECT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg, %cst, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 -2147483648
+    %cmp:_(s32) = G_ICMP intpred(slt), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            no_opt_int64_min
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; The cmp should not change.
+    ;
+    ; If we subtract 1 from the constant, it will wrap around, and so it's not
+    ; true that
+    ;
+    ; x slt c => x sle c - 1
+    ; x sge c => x sgt c - 1
+
+    ; LOWER-LABEL: name: no_opt_int64_min
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: %cst:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: no_opt_int64_min
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64 = COPY $x0
+    ; SELECT: %cst:gpr64 = MOVi64imm -9223372036854775808
+    ; SELECT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %reg, %cst, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 -9223372036854775808
+    %cmp:_(s32) = G_ICMP intpred(slt), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            no_opt_int32_max
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0
+
+    ; The cmp should not change.
+    ;
+    ; If we add 1 to the constant, it will wrap around, and so it's not true
+    ; that
+    ;
+    ; x slt c => x sle c - 1
+    ; x sge c => x sgt c - 1
+
+    ; LOWER-LABEL: name: no_opt_int32_max
+    ; LOWER: liveins: $w0
+    ; LOWER: %reg:_(s32) = COPY $w0
+    ; LOWER: %cst:_(s32) = G_CONSTANT i32 2147483647
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: no_opt_int32_max
+    ; SELECT: liveins: $w0
+    ; SELECT: %reg:gpr32 = COPY $w0
+    ; SELECT: %cst:gpr32 = MOVi32imm 2147483647
+    ; SELECT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %reg, %cst, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s32) = COPY $w0
+    %cst:_(s32) = G_CONSTANT i32 2147483647
+    %cmp:_(s32) = G_ICMP intpred(sle), %reg(s32), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            no_opt_int64_max
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; The cmp should not change.
+    ;
+    ; If we add 1 to the constant, it will wrap around, and so it's not true
+    ; that
+    ;
+    ; x slt c => x sle c - 1
+    ; x sge c => x sgt c - 1
+
+
+    ; LOWER-LABEL: name: no_opt_int64_max
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: %cst:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: no_opt_int64_max
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64 = COPY $x0
+    ; SELECT: %cst:gpr64 = MOVi64imm 9223372036854775807
+    ; SELECT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %reg, %cst, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 9223372036854775807
+    %cmp:_(s32) = G_ICMP intpred(sle), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            no_opt_zero
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+
+    ; The cmp should not change during the lowering pass.
+    ;
+    ; This is an unsigned comparison, so when the constant is 0, the following
+    ; does not hold:
+    ;
+    ; x slt c => x sle c - 1
+    ; x sge c => x sgt c - 1
+
+    ; LOWER-LABEL: name: no_opt_zero
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg:_(s64) = COPY $x0
+    ; LOWER: %cst:_(s64) = G_CONSTANT i64 0
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: no_opt_zero
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg:gpr64sp = COPY $x0
+    ; SELECT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %reg, 0, 0, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg:_(s64) = COPY $x0
+    %cst:_(s64) = G_CONSTANT i64 0
+    %cmp:_(s32) = G_ICMP intpred(ult), %reg(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:            cmp_and_select
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $w0, $w1
+    ; The G_ICMP is optimized here to be a slt comparison by adding 1 to the
+    ; constant. So, the CSELWr should use the predicate code 11, rather than
+    ; 13.
+
+    ; LOWER-LABEL: name: cmp_and_select
+    ; LOWER: liveins: $w0, $w1
+    ; LOWER: %reg0:_(s32) = COPY $w0
+    ; LOWER: %reg1:_(s32) = COPY $w1
+    ; LOWER: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; LOWER: %cmp:_(s32) = G_ICMP intpred(slt), %reg0(s32), [[C]]
+    ; LOWER: %trunc:_(s1) = G_TRUNC %cmp(s32)
+    ; LOWER: %select:_(s32) = G_SELECT %trunc(s1), %reg0, %reg1
+    ; LOWER: $w0 = COPY %select(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: cmp_and_select
+    ; SELECT: liveins: $w0, $w1
+    ; SELECT: %reg0:gpr32common = COPY $w0
+    ; SELECT: %reg1:gpr32 = COPY $w1
+    ; SELECT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg0, 0, 0, implicit-def $nzcv
+    ; SELECT: %select:gpr32 = CSELWr %reg0, %reg1, 11, implicit $nzcv
+    ; SELECT: $w0 = COPY %select
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg0:_(s32) = COPY $w0
+    %reg1:_(s32) = COPY $w1
+    %cst:_(s32) = G_CONSTANT i32 -1
+    %cmp:_(s32) = G_ICMP intpred(sle), %reg0(s32), %cst
+    %trunc:_(s1) = G_TRUNC %cmp(s32)
+    %select:_(s32) = G_SELECT %trunc(s1), %reg0, %reg1
+    $w0 = COPY %select(s32)
+    RET_ReallyLR implicit $w0
+
+...
+---
+name:             andsxri
+legalized: true
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $x0
+    ; Show that we can select a tst/ands by optimizing the G_ICMP in the
+    ; lowering phase.
+
+    ; LOWER-LABEL: name: andsxri
+    ; LOWER: liveins: $x0
+    ; LOWER: %reg0:gpr(s64) = COPY $x0
+    ; LOWER: %bit:gpr(s64) = G_CONSTANT i64 8
+    ; LOWER: %and:gpr(s64) = G_AND %reg0, %bit
+    ; LOWER: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 0
+    ; LOWER: %cmp:gpr(s32) = G_ICMP intpred(sge), %and(s64), [[C]]
+    ; LOWER: $w0 = COPY %cmp(s32)
+    ; LOWER: RET_ReallyLR implicit $w0
+    ; SELECT-LABEL: name: andsxri
+    ; SELECT: liveins: $x0
+    ; SELECT: %reg0:gpr64 = COPY $x0
+    ; SELECT: [[ANDSXri:%[0-9]+]]:gpr64 = ANDSXri %reg0, 8000, implicit-def $nzcv
+    ; SELECT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; SELECT: $w0 = COPY %cmp
+    ; SELECT: RET_ReallyLR implicit $w0
+    %reg0:gpr(s64) = COPY $x0
+    %bit:gpr(s64) = G_CONSTANT i64 8
+    %and:gpr(s64) = G_AND %reg0, %bit
+    %cst:gpr(s64) = G_CONSTANT i64 -1
+    %cmp:gpr(s32) = G_ICMP intpred(sgt), %and(s64), %cst
+    $w0 = COPY %cmp(s32)
+    RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir
deleted file mode 100644
index 605e56e349ee..000000000000
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-immed-compare.mir
+++ /dev/null
@@ -1,708 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
-
-name:            slt_to_sle_s32
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; x slt c => x sle c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: slt_to_sle_s32
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-    ; CHECK: SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 4097
-    %4:gpr(s32) = G_ICMP intpred(slt), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            slt_to_sle_s64
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; x slt c => x sle c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: slt_to_sle_s64
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 4097
-    %4:gpr(s32) = G_ICMP intpred(slt), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            sge_to_sgt_s32
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; x sge c => x sgt c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: sge_to_sgt_s32
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 4097
-    %4:gpr(s32) = G_ICMP intpred(sge), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            sge_to_sgt_s64
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; x sge c => x sgt c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: sge_to_sgt_s64
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 4097
-    %4:gpr(s32) = G_ICMP intpred(sge), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            ult_to_ule_s32
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; x ult c => x ule c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: ult_to_ule_s32
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 4097
-    %4:gpr(s32) = G_ICMP intpred(ult), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            ult_to_ule_s64
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; x ult c => x ule c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: ult_to_ule_s64
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 4097
-    %4:gpr(s32) = G_ICMP intpred(ult), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            uge_to_ugt_s32
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; x uge c => x ugt c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: uge_to_ugt_s32
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 4097
-    %4:gpr(s32) = G_ICMP intpred(uge), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            uge_to_ugt_s64
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; x uge c => x ugt c - 1
-    ;
-    ; We should not have a MOV here. We can subtract 1 from the constant and
-    ; change the condition code.
-    ;
-    ; log_2(4096) == 12, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: uge_to_ugt_s64
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 4097
-    %4:gpr(s32) = G_ICMP intpred(uge), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            sle_to_slt_s32
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; x sle c => x slt c + 1
-    ;
-    ; We should not have a MOV here. We can add 1 to the constant and change
-    ; the condition code.
-    ;
-    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
-    ; left shift.
-    ;
-    ; (We can't use 4095 here, because that's a legal arithmetic immediate.)
-
-    ; CHECK-LABEL: name: sle_to_slt_s32
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 2, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 8191
-    %4:gpr(s32) = G_ICMP intpred(sle), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            sle_to_slt_s64
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; x sle c => x slt c + 1
-    ;
-    ; We should not have a MOV here. We can add 1 to the constant and change
-    ; the condition code.
-    ;
-    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: sle_to_slt_s64
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 2, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 8191
-    %4:gpr(s32) = G_ICMP intpred(sle), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            sgt_to_sge_s32
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; x sgt c => s sge c + 1
-    ;
-    ; We should not have a MOV here. We can add 1 to the constant and change
-    ; the condition code.
-    ;
-    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: sgt_to_sge_s32
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-    ; CHECK: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 2, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 8191
-    %4:gpr(s32) = G_ICMP intpred(sgt), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            sgt_to_sge_s64
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; x sgt c => s sge c + 1
-    ;
-    ; We should not have a MOV here. We can add 1 to the constant and change
-    ; the condition code.
-    ;
-    ; log_2(8192) == 13, so we can represent this as a 12 bit value with a
-    ; left shift.
-
-    ; CHECK-LABEL: name: sgt_to_sge_s64
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 2, 12, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 8191
-    %4:gpr(s32) = G_ICMP intpred(sgt), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            no_opt_int32_min
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; This one should contain a MOV.
-    ;
-    ; If we subtract 1 from the constant, it will wrap around, and so it's not
-    ; true that
-    ;
-    ; x slt c => x sle c - 1
-    ; x sge c => x sgt c - 1
-
-    ; CHECK-LABEL: name: no_opt_int32_min
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -2147483648
-    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 -2147483648
-    %4:gpr(s32) = G_ICMP intpred(slt), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            no_opt_int64_min
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; This one should contain a MOV.
-    ;
-    ; If we subtract 1 from the constant, it will wrap around, and so it's not
-    ; true that
-    ;
-    ; x slt c => x sle c - 1
-    ; x sge c => x sgt c - 1
-
-    ; CHECK-LABEL: name: no_opt_int64_min
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -9223372036854775808
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 -9223372036854775808
-    %4:gpr(s32) = G_ICMP intpred(slt), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            no_opt_int32_max
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $w0
-
-    ; This one should contain a MOV.
-    ;
-    ; If we add 1 to the constant, it will wrap around, and so it's not true
-    ; that
-    ;
-    ; x slt c => x sle c - 1
-    ; x sge c => x sgt c - 1
-
-    ; CHECK-LABEL: name: no_opt_int32_max
-    ; CHECK: liveins: $w0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 2147483647
-    ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
-    ; CHECK: $w0 = COPY [[ANDWri]]
-    ; CHECK: RET_ReallyLR implicit $w0
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 2147483647
-    %4:gpr(s32) = G_ICMP intpred(sle), %0(s32), %1
-    %5:gpr(s32) = G_CONSTANT i32 1
-    %3:gpr(s32) = G_AND %4, %5
-    $w0 = COPY %3(s32)
-    RET_ReallyLR implicit $w0
-
-...
----
-name:            no_opt_int64_max
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; This one should contain a MOV.
-    ;
-    ; If we add 1 to the constant, it will wrap around, and so it's not true
-    ; that
-    ;
-    ; x slt c => x sle c - 1
-    ; x sge c => x sgt c - 1
-
-
-    ; CHECK-LABEL: name: no_opt_int64_max
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 9223372036854775807
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 9223372036854775807
-    %4:gpr(s32) = G_ICMP intpred(sle), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            no_opt_zero
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0
-
-    ; This one should contain a MOV.
-    ;
-    ; This is an unsigned comparison, so when the constant is 0, the following
-    ; does not hold:
-    ;
-    ; x slt c => x sle c - 1
-    ; x sge c => x sgt c - 1
-
-    ; CHECK-LABEL: name: no_opt_zero
-    ; CHECK: liveins: $x0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri [[COPY]], 0, 0, implicit-def $nzcv
-    ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
-    ; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
-    ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
-    ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
-    ; CHECK: $x0 = COPY [[ANDXri]]
-    ; CHECK: RET_ReallyLR implicit $x0
-    %0:gpr(s64) = COPY $x0
-    %1:gpr(s64) = G_CONSTANT i64 0
-    %4:gpr(s32) = G_ICMP intpred(ult), %0(s64), %1
-    %6:gpr(s64) = G_ANYEXT %4(s32)
-    %5:gpr(s64) = G_CONSTANT i64 1
-    %3:gpr(s64) = G_AND %6, %5
-    $x0 = COPY %3(s64)
-    RET_ReallyLR implicit $x0
-
-...
----
-name:            more_than_one_use_select
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0, $x1, $x2
-
-    ; Both of these selects use the same compare.
-    ;
-    ; They should both be optimized in the same way, so the SUBS produced for
-    ; each CSEL should be the same.
-
-    ; CHECK-LABEL: name: more_than_one_use_select
-    ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %a:gpr64common = COPY $x0
-    ; CHECK: %b:gpr64 = COPY $x1
-    ; CHECK: %c:gpr64 = COPY $x2
-    ; CHECK: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %a, 0, 0, implicit-def $nzcv
-    ; CHECK: %select1:gpr64 = CSELXr %a, %b, 11, implicit $nzcv
-    ; CHECK: [[SUBSXri1:%[0-9]+]]:gpr64 = SUBSXri %a, 0, 0, implicit-def $nzcv
-    ; CHECK: %select2:gpr64 = CSELXr %b, %c, 11, implicit $nzcv
-    ; CHECK: %add:gpr64 = ADDXrr %select1, %select2
-    ; CHECK: $x0 = COPY %add
-    ; CHECK: RET_ReallyLR implicit $x0
-    %a:gpr(s64) = COPY $x0
-    %b:gpr(s64) = COPY $x1
-    %c:gpr(s64) = COPY $x2
-    %cst:gpr(s64) = G_CONSTANT i64 -1
-    %cmp:gpr(s32) = G_ICMP intpred(sle), %a(s64), %cst
-    %trunc_cmp:gpr(s1) = G_TRUNC %cmp(s32)
-    %select1:gpr(s64) = G_SELECT %trunc_cmp(s1), %a, %b
-    %select2:gpr(s64) = G_SELECT %trunc_cmp(s1), %b, %c
-    %add:gpr(s64) = G_ADD %select1, %select2
-    $x0 = COPY %add(s64)
-    RET_ReallyLR implicit $x0
-...
----
-name:            more_than_one_use_select_no_opt
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $x0, $x1, $x2
-
-    ; When we don't end up doing the optimization, we should not change the
-    ; predicate.
-    ;
-    ; In this case, the CSELXrs should both have predicate code 13.
-
-    ; CHECK-LABEL: name: more_than_one_use_select_no_opt
-    ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: %a:gpr64 = COPY $x0
-    ; CHECK: %b:gpr64 = COPY $x1
-    ; CHECK: %c:gpr64 = COPY $x2
-    ; CHECK: %cst:gpr64 = MOVi64imm 922337203685477580
-    ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %a, %cst, implicit-def $nzcv
-    ; CHECK: %select1:gpr64 = CSELXr %a, %b, 13, implicit $nzcv
-    ; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr %a, %cst, implicit-def $nzcv
-    ; CHECK: %select2:gpr64 = CSELXr %b, %c, 13, implicit $nzcv
-    ; CHECK: %add:gpr64 = ADDXrr %select1, %select2
-    ; CHECK: $x0 = COPY %add
-    ; CHECK: RET_ReallyLR implicit $x0
-    %a:gpr(s64) = COPY $x0
-    %b:gpr(s64) = COPY $x1
-    %c:gpr(s64) = COPY $x2
-    %cst:gpr(s64) = G_CONSTANT i64 922337203685477580
-    %cmp:gpr(s32) = G_ICMP intpred(sle), %a(s64), %cst
-    %trunc_cmp:gpr(s1) = G_TRUNC %cmp(s32)
-    %select1:gpr(s64) = G_SELECT %trunc_cmp(s1), %a, %b
-    %select2:gpr(s64) = G_SELECT %trunc_cmp(s1), %b, %c
-    %add:gpr(s64) = G_ADD %select1, %select2
-    $x0 = COPY %add(s64)
-    RET_ReallyLR implicit $x0
-...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
index 68d515b529e4..2cb6e5a253c2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
@@ -184,35 +184,4 @@ body:             |
   bb.3:
     RET_ReallyLR
 
-...
----
-name:            update_pred_minus_one
-legalized:       true
-regBankSelected: true
 
-body:             |
-  ; CHECK-LABEL: name: update_pred_minus_one
-  ; CHECK: bb.0:
-  ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK:   [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-  ; CHECK:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 0, 0, implicit-def $nzcv
-  ; CHECK:   Bcc 11, %bb.1, implicit $nzcv
-  ; CHECK:   B %bb.0
-  ; CHECK: bb.1:
-  ; The G_ICMP here will be optimized into a slt against 0.
-  ; The branch should inherit this change, so we should have Bcc 11 rather than
-  ; Bcc 13.
-
-  bb.0:
-    liveins: $w0
-    successors: %bb.0, %bb.1
-
-    %0:gpr(s32) = COPY $w0
-    %1:gpr(s32) = G_CONSTANT i32 -1
-    %2:gpr(s32) = G_ICMP intpred(sle), %0, %1
-    %3:gpr(s1) = G_TRUNC %2(s32)
-    G_BRCOND %3(s1), %bb.1
-    G_BR %bb.0
-
-  bb.1:
-...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
index 249807a285f0..2e4f88fe4d45 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
@@ -99,9 +99,10 @@ body:             |
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
   ; CHECK:   %copy:gpr64 = COPY $x0
-  ; CHECK:   %and:gpr64sp = ANDXri %copy, 8000
-  ; CHECK:   [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %and, 0, 0, implicit-def $nzcv
-  ; CHECK:   Bcc 10, %bb.1, implicit $nzcv
+  ; CHECK:   %negative_one:gpr64 = MOVi64imm -1
+  ; CHECK:   %and:gpr64common = ANDXri %copy, 8000
+  ; CHECK:   [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %and, %negative_one, implicit-def $nzcv
+  ; CHECK:   Bcc 12, %bb.1, implicit $nzcv
   ; CHECK:   B %bb.0
   ; CHECK: bb.1:
   ; CHECK:   RET_ReallyLR


        


More information about the llvm-commits mailing list