[llvm] [AArch64] Support MI and PL (PR #150314)

via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 4 13:33:15 PDT 2025


https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/150314

>From 83fc0bcbbcd297e575ed93e44100ceb97cefdc55 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Wed, 23 Jul 2025 15:59:11 -0400
Subject: [PATCH] [AArch64] Support MI and PL

Now, why would we want to do this?

There are a small number of places where this works:
1. It helps peepholeopt when less flag checking.
2. It allows the folding of things such as x - 0x80000000 < 0 to be folded to cmp x, register holding this value
3. We can refine the other passes over time for this.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    |  23 +--
 .../GISel/AArch64InstructionSelector.cpp      |  26 ++-
 .../16bit-float-promotion-with-nofp.ll        |   8 +-
 .../AArch64/GlobalISel/opt-and-tbnz-tbz.mir   |   2 +-
 ...postlegalizer-lowering-adjust-icmp-imm.mir |   4 +-
 .../GlobalISel/select-tbnz-from-cmp.mir       |   2 +-
 llvm/test/CodeGen/AArch64/arm64-ccmp.ll       |  14 +-
 llvm/test/CodeGen/AArch64/arm64-fmax.ll       |   2 +-
 llvm/test/CodeGen/AArch64/arm64-fp128.ll      |   4 +-
 llvm/test/CodeGen/AArch64/arm64-vabs.ll       |   4 +-
 .../check-sign-bit-before-extension.ll        |   8 +-
 llvm/test/CodeGen/AArch64/combine-sdiv.ll     |   8 +-
 llvm/test/CodeGen/AArch64/csel-cmp-cse.ll     |   2 +-
 llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll   |  52 +++++-
 llvm/test/CodeGen/AArch64/fcmp-fp128.ll       |  12 +-
 llvm/test/CodeGen/AArch64/fcmp.ll             |  82 ++++-----
 llvm/test/CodeGen/AArch64/fp-intrinsics.ll    |  16 +-
 llvm/test/CodeGen/AArch64/fpclamptosat.ll     |   6 +-
 llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll |  12 +-
 .../test/CodeGen/AArch64/fptosi-sat-scalar.ll |  30 ++--
 .../test/CodeGen/AArch64/fptosi-sat-vector.ll |  76 ++++-----
 .../test/CodeGen/AArch64/fptoui-sat-scalar.ll |   6 +-
 .../test/CodeGen/AArch64/fptoui-sat-vector.ll |  72 ++++----
 .../CodeGen/AArch64/logical_shifted_reg.ll    |   4 +-
 llvm/test/CodeGen/AArch64/min-max-combine.ll  |   8 +-
 llvm/test/CodeGen/AArch64/pr72777.ll          |  17 +-
 llvm/test/CodeGen/AArch64/sdivpow2.ll         | 159 ++++++++++++------
 .../CodeGen/AArch64/select-constant-xor.ll    |  34 ++--
 .../CodeGen/AArch64/selectcc-to-shiftand.ll   |  12 +-
 llvm/test/CodeGen/AArch64/signbit-shift.ll    |   4 +-
 llvm/test/CodeGen/AArch64/smul_fix_sat.ll     |  18 +-
 llvm/test/CodeGen/AArch64/srem-pow2.ll        |   4 +-
 llvm/test/CodeGen/AArch64/sshl_sat.ll         |   2 +-
 llvm/test/CodeGen/AArch64/stack-hazard.ll     |  12 +-
 llvm/test/CodeGen/AArch64/tbz-tbnz.ll         |  14 +-
 llvm/test/CodeGen/AArch64/vecreduce-bool.ll   |  24 +--
 .../AArch64/vecreduce-fmin-legalization.ll    |   2 +-
 37 files changed, 450 insertions(+), 335 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2b6ea86ee1af5..deaa1ab6df9e9 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -3320,7 +3320,8 @@ static bool isZerosVector(const SDNode *N) {
 
 /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
 /// CC
-static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
+static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC, SDValue RHS,
+                                                  bool canUsePLOrMI = true) {
   switch (CC) {
   default:
     llvm_unreachable("Unknown condition code!");
@@ -3331,9 +3332,9 @@ static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {
   case ISD::SETGT:
     return AArch64CC::GT;
   case ISD::SETGE:
-    return AArch64CC::GE;
+    return canUsePLOrMI && isNullConstant(RHS) ? AArch64CC::PL : AArch64CC::GE;
   case ISD::SETLT:
-    return AArch64CC::LT;
+    return canUsePLOrMI && isNullConstant(RHS) ? AArch64CC::MI : AArch64CC::LT;
   case ISD::SETLE:
     return AArch64CC::LE;
   case ISD::SETUGT:
@@ -3782,7 +3783,7 @@ static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
     SDLoc DL(Val);
     // Determine OutCC and handle FP special case.
     if (isInteger) {
-      OutCC = changeIntCCToAArch64CC(CC);
+      OutCC = changeIntCCToAArch64CC(CC, RHS);
     } else {
       assert(LHS.getValueType().isFloatingPoint());
       AArch64CC::CondCode ExtraCC;
@@ -4065,7 +4066,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
         Cmp = emitComparison(
             SExt, DAG.getSignedConstant(ValueofRHS, DL, RHS.getValueType()), CC,
             DL, DAG);
-        AArch64CC = changeIntCCToAArch64CC(CC);
+        AArch64CC = changeIntCCToAArch64CC(CC, RHS);
       }
     }
 
@@ -4079,7 +4080,7 @@ static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
 
   if (!Cmp) {
     Cmp = emitComparison(LHS, RHS, CC, DL, DAG);
-    AArch64CC = changeIntCCToAArch64CC(CC);
+    AArch64CC = changeIntCCToAArch64CC(CC, RHS);
   }
   AArch64cc = getCondCode(DAG, AArch64CC);
   return Cmp;
@@ -10650,10 +10651,10 @@ SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
     // Try to emit Armv9.6 CB instructions. We prefer tb{n}z/cb{n}z due to their
     // larger branch displacement but do prefer CB over cmp + br.
     if (Subtarget->hasCMPBR() &&
-        AArch64CC::isValidCBCond(changeIntCCToAArch64CC(CC)) &&
+        AArch64CC::isValidCBCond(changeIntCCToAArch64CC(CC, RHS, false)) &&
         ProduceNonFlagSettingCondBr) {
       SDValue Cond =
-          DAG.getTargetConstant(changeIntCCToAArch64CC(CC), DL, CondCodeVT);
+          DAG.getTargetConstant(changeIntCCToAArch64CC(CC, RHS, false), DL, CondCodeVT);
       return DAG.getNode(AArch64ISD::CB, DL, MVT::Other, Chain, Cond, LHS, RHS,
                          Dest);
     }
@@ -11210,7 +11211,7 @@ SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op,
 
   ISD::CondCode Cond = cast<CondCodeSDNode>(Op.getOperand(3))->get();
   ISD::CondCode CondInv = ISD::getSetCCInverse(Cond, VT);
-  SDValue CCVal = getCondCode(DAG, changeIntCCToAArch64CC(CondInv));
+  SDValue CCVal = getCondCode(DAG, changeIntCCToAArch64CC(CondInv, RHS, false));
   // Inputs are swapped because the condition is inverted. This will allow
   // matching with a single CSINC instruction.
   return DAG.getNode(AArch64ISD::CSEL, DL, OpVT, FVal, TVal, CCVal,
@@ -11492,7 +11493,7 @@ SDValue AArch64TargetLowering::LowerSELECT_CC(
     ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS);
     if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() &&
         !RHSVal->isZero() && !RHSVal->isAllOnes()) {
-      AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
+      AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC, RHS);
       // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to
       // "a != C ? x : a" to avoid materializing C.
       if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ)
@@ -11503,7 +11504,7 @@ SDValue AArch64TargetLowering::LowerSELECT_CC(
       assert (CTVal && CFVal && "Expected constant operands for CSNEG.");
       // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to
       // avoid materializing C.
-      AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
+      AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC, RHS);
       if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) {
         Opcode = AArch64ISD::CSINV;
         TVal = LHS;
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index d9056926ff249..4c289669e4213 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1349,7 +1349,10 @@ AArch64InstructionSelector::emitSelect(Register Dst, Register True,
   return &*SelectInst;
 }
 
-static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
+static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P,
+                                                     Register RHS,
+                                                     MachineRegisterInfo *MRI,
+                                                     bool canUsePLOrMI = true) {
   switch (P) {
   default:
     llvm_unreachable("Unknown condition code!");
@@ -1360,8 +1363,18 @@ static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
   case CmpInst::ICMP_SGT:
     return AArch64CC::GT;
   case CmpInst::ICMP_SGE:
+    if (canUsePLOrMI) {
+      auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS, *MRI);
+      if (ValAndVReg && ValAndVReg->Value == 0)
+        return AArch64CC::PL;
+    }
     return AArch64CC::GE;
   case CmpInst::ICMP_SLT:
+    if (canUsePLOrMI) {
+      auto ValAndVReg = getIConstantVRegValWithLookThrough(RHS, *MRI);
+      if (ValAndVReg && ValAndVReg->Value == 0)
+        return AArch64CC::MI;
+    }
     return AArch64CC::LT;
   case CmpInst::ICMP_SLE:
     return AArch64CC::LE;
@@ -1813,7 +1826,8 @@ bool AArch64InstructionSelector::selectCompareBranchFedByICmp(
   auto &PredOp = ICmp.getOperand(1);
   emitIntegerCompare(ICmp.getOperand(2), ICmp.getOperand(3), PredOp, MIB);
   const AArch64CC::CondCode CC = changeICMPPredToAArch64CC(
-      static_cast<CmpInst::Predicate>(PredOp.getPredicate()));
+      static_cast<CmpInst::Predicate>(PredOp.getPredicate()),
+      ICmp.getOperand(3).getReg(), MIB.getMRI());
   MIB.buildInstr(AArch64::Bcc, {}, {}).addImm(CC).addMBB(DestMBB);
   I.eraseFromParent();
   return true;
@@ -2511,7 +2525,7 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) {
                        /*RHS=*/Cmp->getOperand(3), PredOp, MIB);
     auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
     const AArch64CC::CondCode InvCC =
-        changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
+        changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred), Cmp->getOperand(3).getReg(), &MRI);
     emitCSINC(/*Dst=*/AddDst, /*Src =*/AddLHS, /*Src2=*/AddLHS, InvCC, MIB);
     I.eraseFromParent();
     return true;
@@ -3578,7 +3592,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
     emitIntegerCompare(I.getOperand(2), I.getOperand(3), PredOp, MIB);
     auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
     const AArch64CC::CondCode InvCC =
-        changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred));
+        changeICMPPredToAArch64CC(CmpInst::getInversePredicate(Pred), I.getOperand(2).getReg(), I.getOperand(3).getReg(), &MRI);
     emitCSINC(/*Dst=*/I.getOperand(0).getReg(), /*Src1=*/AArch64::WZR,
               /*Src2=*/AArch64::WZR, InvCC, MIB);
     I.eraseFromParent();
@@ -4931,7 +4945,7 @@ MachineInstr *AArch64InstructionSelector::emitConjunctionRec(
     if (Negate)
       CC = CmpInst::getInversePredicate(CC);
     if (isa<GICmp>(Cmp)) {
-      OutCC = changeICMPPredToAArch64CC(CC);
+      OutCC = changeICMPPredToAArch64CC(CC, RHS, MIB.getMRI());
     } else {
       // Handle special FP cases.
       AArch64CC::CondCode ExtraCC;
@@ -5101,7 +5115,7 @@ bool AArch64InstructionSelector::tryOptSelect(GSelect &I) {
     emitIntegerCompare(CondDef->getOperand(2), CondDef->getOperand(3), PredOp,
                        MIB);
     auto Pred = static_cast<CmpInst::Predicate>(PredOp.getPredicate());
-    CondCode = changeICMPPredToAArch64CC(Pred);
+    CondCode = changeICMPPredToAArch64CC(Pred, CondDef->getOperand(3).getReg(), &MRI);
   } else {
     // Get the condition code for the select.
     auto Pred =
diff --git a/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll b/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll
index 5d4f9204e7063..c9560e705280b 100644
--- a/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll
+++ b/llvm/test/CodeGen/AArch64/16bit-float-promotion-with-nofp.ll
@@ -77,7 +77,7 @@ define double @selectcc_f64(double %a, double %b, i32 %d) {
 ; CHECK-LABEL: selectcc_f64:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w2, #0
-; CHECK-NEXT:    csel x0, x0, x1, lt
+; CHECK-NEXT:    csel x0, x0, x1, mi
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp slt i32 %d, 0
@@ -89,7 +89,7 @@ define float @selectcc_f32(float %a, float %b, i32 %d) {
 ; CHECK-LABEL: selectcc_f32:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w2, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp slt i32 %d, 0
@@ -101,7 +101,7 @@ define half @selectcc_f16(half %a, half %b, i32 %d) {
 ; CHECK-LABEL: selectcc_f16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w2, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp slt i32 %d, 0
@@ -113,7 +113,7 @@ define bfloat @selectcc_bf16(bfloat %a, bfloat %b, i32 %d) {
 ; CHECK-LABEL: selectcc_bf16:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w2, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
 entry:
   %c = icmp slt i32 %d, 0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
index 95ae12f6d59db..a5b6ea487aac4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
@@ -149,7 +149,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr32 = COPY $w0
   ; CHECK-NEXT:   [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
-  ; CHECK-NEXT:   Bcc 11, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   Bcc 4, %bb.1, implicit $nzcv
   ; CHECK-NEXT:   B %bb.0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
index edc33e340c9b6..3b991c3d910d5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
@@ -661,7 +661,7 @@ body:             |
     ; SELECT-NEXT: %reg0:gpr32common = COPY $w0
     ; SELECT-NEXT: %reg1:gpr32 = COPY $w1
     ; SELECT-NEXT: [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri %reg0, 0, 0, implicit-def $nzcv
-    ; SELECT-NEXT: %select:gpr32 = CSELWr %reg0, %reg1, 11, implicit $nzcv
+    ; SELECT-NEXT: %select:gpr32 = CSELWr %reg0, %reg1, 4, implicit $nzcv
     ; SELECT-NEXT: $w0 = COPY %select
     ; SELECT-NEXT: RET_ReallyLR implicit $w0
     %reg0:_(s32) = COPY $w0
@@ -699,7 +699,7 @@ body:             |
     ; SELECT-NEXT: {{  $}}
     ; SELECT-NEXT: %reg0:gpr64 = COPY $x0
     ; SELECT-NEXT: [[ANDSXri:%[0-9]+]]:gpr64 = ANDSXri %reg0, 8000, implicit-def $nzcv
-    ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
+    ; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 4, implicit $nzcv
     ; SELECT-NEXT: $w0 = COPY %cmp
     ; SELECT-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s64) = COPY $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-tbnz-from-cmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-tbnz-from-cmp.mir
index 30db00aa97813..67262c27e2059 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-tbnz-from-cmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-tbnz-from-cmp.mir
@@ -166,7 +166,7 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %copy:gpr64 = COPY $x0
   ; CHECK-NEXT:   [[ANDSXri:%[0-9]+]]:gpr64 = ANDSXri %copy, 8000, implicit-def $nzcv
-  ; CHECK-NEXT:   Bcc 11, %bb.1, implicit $nzcv
+  ; CHECK-NEXT:   Bcc 4, %bb.1, implicit $nzcv
   ; CHECK-NEXT:   B %bb.0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
index a546ffd7143ad..4fe01e838771d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -600,7 +600,7 @@ define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
 ; CHECK-SD-LABEL: select_noccmp1:
 ; CHECK-SD:       ; %bb.0:
 ; CHECK-SD-NEXT:    cmp x0, #0
-; CHECK-SD-NEXT:    ccmp x0, #13, #4, lt
+; CHECK-SD-NEXT:    ccmp x0, #13, #4, mi
 ; CHECK-SD-NEXT:    cset w8, gt
 ; CHECK-SD-NEXT:    cmp x2, #2
 ; CHECK-SD-NEXT:    ccmp x2, #4, #4, lt
@@ -630,7 +630,7 @@ define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
 ; CHECK-SD-LABEL: select_noccmp2:
 ; CHECK-SD:       ; %bb.0:
 ; CHECK-SD-NEXT:    cmp x0, #0
-; CHECK-SD-NEXT:    ccmp x0, #13, #0, ge
+; CHECK-SD-NEXT:    ccmp x0, #13, #0, pl
 ; CHECK-SD-NEXT:    cset w8, gt
 ; CHECK-SD-NEXT:    cmp w8, #0
 ; CHECK-SD-NEXT:    csel x0, xzr, x3, ne
@@ -664,7 +664,7 @@ define i32 @select_noccmp3(i32 %v0, i32 %v1, i32 %v2) {
 ; CHECK-SD-LABEL: select_noccmp3:
 ; CHECK-SD:       ; %bb.0:
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    ccmp w0, #13, #0, ge
+; CHECK-SD-NEXT:    ccmp w0, #13, #0, pl
 ; CHECK-SD-NEXT:    cset w8, gt
 ; CHECK-SD-NEXT:    cmp w0, #22
 ; CHECK-SD-NEXT:    mov w9, #44 ; =0x2c
@@ -937,11 +937,11 @@ define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3,
 ; CHECK-SD-NEXT:    stp q2, q3, [sp] ; 32-byte Folded Spill
 ; CHECK-SD-NEXT:    bl ___lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    cset w21, lt
+; CHECK-SD-NEXT:    cset w21, mi
 ; CHECK-SD-NEXT:    ldp q0, q1, [sp] ; 32-byte Folded Reload
 ; CHECK-SD-NEXT:    bl ___getf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    cset w8, ge
+; CHECK-SD-NEXT:    cset w8, pl
 ; CHECK-SD-NEXT:    tst w8, w21
 ; CHECK-SD-NEXT:    csel w0, w20, w19, ne
 ; CHECK-SD-NEXT:    ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
@@ -964,8 +964,8 @@ define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3,
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp] ; 32-byte Folded Reload
 ; CHECK-GI-NEXT:    bl ___getf2
 ; CHECK-GI-NEXT:    cmp w21, #0
-; CHECK-GI-NEXT:    ccmp w0, #0, #8, lt
-; CHECK-GI-NEXT:    csel w0, w19, w20, ge
+; CHECK-GI-NEXT:    ccmp w0, #0, #8, mi
+; CHECK-GI-NEXT:    csel w0, w19, w20, pl
 ; CHECK-GI-NEXT:    ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/arm64-fmax.ll b/llvm/test/CodeGen/AArch64/arm64-fmax.ll
index d7d54a6e48a92..85104775339b6 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fmax.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fmax.ll
@@ -60,7 +60,7 @@ define i64 @test_integer(i64  %in) {
 ; CHECK-LABEL: test_integer:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    csel x0, xzr, x0, lt
+; CHECK-NEXT:    csel x0, xzr, x0, mi
 ; CHECK-NEXT:    ret
   %cmp = icmp slt i64 %in, 0
   %val = select i1 %cmp, i64 0, i64 %in
diff --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index a75f6419d5a5a..3e4b887fed55d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -258,7 +258,7 @@ define i32 @test_br_cc(fp128 %lhs, fp128 %rhs) {
 ; CHECK-SD-NEXT:    mov w8, #29 // =0x1d
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    mov w9, #42 // =0x2a
-; CHECK-SD-NEXT:    csel w0, w9, w8, lt
+; CHECK-SD-NEXT:    csel w0, w9, w8, mi
 ; CHECK-SD-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
 ;
@@ -271,7 +271,7 @@ define i32 @test_br_cc(fp128 %lhs, fp128 %rhs) {
 ; CHECK-GI-NEXT:    mov w8, #29 // =0x1d
 ; CHECK-GI-NEXT:    mov w9, #42 // =0x2a
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, lt
+; CHECK-GI-NEXT:    csel w0, w9, w8, mi
 ; CHECK-GI-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
   %cond = fcmp olt fp128 %lhs, %rhs
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 78881c80ccc10..00e9a1baec727 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -1830,10 +1830,10 @@ define <2 x i128> @uabd_i64(<2 x i64> %a, <2 x i64> %b) {
 ; CHECK-GI-NEXT:    subs x10, x11, x13
 ; CHECK-GI-NEXT:    sbc x11, x14, x15
 ; CHECK-GI-NEXT:    cmp x9, #0
-; CHECK-GI-NEXT:    cset w12, lt
+; CHECK-GI-NEXT:    cset w12, mi
 ; CHECK-GI-NEXT:    csel w12, wzr, w12, eq
 ; CHECK-GI-NEXT:    cmp x11, #0
-; CHECK-GI-NEXT:    cset w13, lt
+; CHECK-GI-NEXT:    cset w13, mi
 ; CHECK-GI-NEXT:    csel w13, wzr, w13, eq
 ; CHECK-GI-NEXT:    negs x14, x8
 ; CHECK-GI-NEXT:    ngc x15, x9
diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
index 1d60929f2b94c..0960c4c2a3342 100644
--- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
+++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
@@ -80,7 +80,7 @@ define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sxtb w8, w0
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w8, w1, w2, lt
+; CHECK-NEXT:    csel w8, w1, w2, mi
 ; CHECK-NEXT:    add w0, w8, w0, uxtb
 ; CHECK-NEXT:    ret
 entry:
@@ -102,7 +102,7 @@ define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind {
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    sxth w8, w0
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w8, w1, w2, lt
+; CHECK-NEXT:    csel w8, w1, w2, mi
 ; CHECK-NEXT:    add w0, w8, w0, uxth
 ; CHECK-NEXT:    ret
 entry:
@@ -123,7 +123,7 @@ define i64 @g_i32_sign_extend_inreg(i32 %in, i64 %a, i64 %b) nounwind {
 ; CHECK-LABEL: g_i32_sign_extend_inreg:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    csel x8, x1, x2, lt
+; CHECK-NEXT:    csel x8, x1, x2, mi
 ; CHECK-NEXT:    add x0, x8, w0, uxtw
 ; CHECK-NEXT:    ret
 entry:
@@ -170,7 +170,7 @@ define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind {
 ; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
 ; CHECK-NEXT:    sxtw x8, w0
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel x8, x1, x2, lt
+; CHECK-NEXT:    csel x8, x1, x2, mi
 ; CHECK-NEXT:    add x0, x8, w0, uxtw
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/combine-sdiv.ll b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
index e1ba0e98a6c01..6208a697cab11 100644
--- a/llvm/test/CodeGen/AArch64/combine-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/combine-sdiv.ll
@@ -1464,7 +1464,7 @@ define i32 @combine_i32_sdiv_pow2(i32 %x) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    add w8, w0, #15
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    csel w8, w8, w0, lt
+; CHECK-SD-NEXT:    csel w8, w8, w0, mi
 ; CHECK-SD-NEXT:    asr w0, w8, #4
 ; CHECK-SD-NEXT:    ret
 ;
@@ -1483,7 +1483,7 @@ define i32 @combine_i32_sdiv_negpow2(i32 %x) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    add w8, w0, #255
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    csel w8, w8, w0, lt
+; CHECK-SD-NEXT:    csel w8, w8, w0, mi
 ; CHECK-SD-NEXT:    neg w0, w8, asr #8
 ; CHECK-SD-NEXT:    ret
 ;
@@ -1502,7 +1502,7 @@ define i64 @combine_i64_sdiv_pow2(i64 %x) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    add x8, x0, #15
 ; CHECK-SD-NEXT:    cmp x0, #0
-; CHECK-SD-NEXT:    csel x8, x8, x0, lt
+; CHECK-SD-NEXT:    csel x8, x8, x0, mi
 ; CHECK-SD-NEXT:    asr x0, x8, #4
 ; CHECK-SD-NEXT:    ret
 ;
@@ -1521,7 +1521,7 @@ define i64 @combine_i64_sdiv_negpow2(i64 %x) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    add x8, x0, #255
 ; CHECK-SD-NEXT:    cmp x0, #0
-; CHECK-SD-NEXT:    csel x8, x8, x0, lt
+; CHECK-SD-NEXT:    csel x8, x8, x0, mi
 ; CHECK-SD-NEXT:    neg x0, x8, asr #8
 ; CHECK-SD-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/AArch64/csel-cmp-cse.ll b/llvm/test/CodeGen/AArch64/csel-cmp-cse.ll
index e745326323329..07b32ef0830c2 100644
--- a/llvm/test/CodeGen/AArch64/csel-cmp-cse.ll
+++ b/llvm/test/CodeGen/AArch64/csel-cmp-cse.ll
@@ -706,7 +706,7 @@ define i32 @test_ugtsmax_sub_add_i32(i32 %x0, i32 %x1) {
 ; CHECK-NEXT:    add w9, w0, w1
 ; CHECK-NEXT:    cmp w1, #0
 ; CHECK-NEXT:    add w8, w9, w8
-; CHECK-NEXT:    csel w0, wzr, w8, lt
+; CHECK-NEXT:    csel w0, wzr, w8, mi
 ; CHECK-NEXT:    ret
   %cmp = icmp ugt i32 %x1, 2147483647
   %add = add i32 %x0, %x1
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll b/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
index 539fe7e7d3c83..7b6780d0e1e1f 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-sdiv.ll
@@ -1,12 +1,17 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-linux-gnu                               -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK
-; RUN: llc -mtriple=aarch64-linux-gnu -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK
+; RUN: llc -mtriple=aarch64-linux-gnu -fast-isel -fast-isel-abort=1 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-FAST-ISEL
 
 define i32 @sdiv_i32_exact(i32 %a) {
 ; CHECK-LABEL: sdiv_i32_exact:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr w0, w0, #3
 ; CHECK-NEXT:    ret
+;
+; CHECK-FAST-ISEL-LABEL: sdiv_i32_exact:
+; CHECK-FAST-ISEL:       // %bb.0:
+; CHECK-FAST-ISEL-NEXT:    asr w0, w0, #3
+; CHECK-FAST-ISEL-NEXT:    ret
   %1 = sdiv exact i32 %a, 8
   ret i32 %1
 }
@@ -16,9 +21,17 @@ define i32 @sdiv_i32_pos(i32 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, #7
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    csel w8, w8, w0, lt
+; CHECK-NEXT:    csel w8, w8, w0, mi
 ; CHECK-NEXT:    asr w0, w8, #3
 ; CHECK-NEXT:    ret
+;
+; CHECK-FAST-ISEL-LABEL: sdiv_i32_pos:
+; CHECK-FAST-ISEL:       // %bb.0:
+; CHECK-FAST-ISEL-NEXT:    add w8, w0, #7
+; CHECK-FAST-ISEL-NEXT:    cmp w0, #0
+; CHECK-FAST-ISEL-NEXT:    csel w8, w8, w0, lt
+; CHECK-FAST-ISEL-NEXT:    asr w0, w8, #3
+; CHECK-FAST-ISEL-NEXT:    ret
   %1 = sdiv i32 %a, 8
   ret i32 %1
 }
@@ -28,9 +41,17 @@ define i32 @sdiv_i32_neg(i32 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add w8, w0, #7
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    csel w8, w8, w0, lt
+; CHECK-NEXT:    csel w8, w8, w0, mi
 ; CHECK-NEXT:    neg w0, w8, asr #3
 ; CHECK-NEXT:    ret
+;
+; CHECK-FAST-ISEL-LABEL: sdiv_i32_neg:
+; CHECK-FAST-ISEL:       // %bb.0:
+; CHECK-FAST-ISEL-NEXT:    add w8, w0, #7
+; CHECK-FAST-ISEL-NEXT:    cmp w0, #0
+; CHECK-FAST-ISEL-NEXT:    csel w8, w8, w0, lt
+; CHECK-FAST-ISEL-NEXT:    neg w0, w8, asr #3
+; CHECK-FAST-ISEL-NEXT:    ret
   %1 = sdiv i32 %a, -8
   ret i32 %1
 }
@@ -40,6 +61,11 @@ define i64 @sdiv_i64_exact(i64 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    asr x0, x0, #4
 ; CHECK-NEXT:    ret
+;
+; CHECK-FAST-ISEL-LABEL: sdiv_i64_exact:
+; CHECK-FAST-ISEL:       // %bb.0:
+; CHECK-FAST-ISEL-NEXT:    asr x0, x0, #4
+; CHECK-FAST-ISEL-NEXT:    ret
   %1 = sdiv exact i64 %a, 16
   ret i64 %1
 }
@@ -49,9 +75,17 @@ define i64 @sdiv_i64_pos(i64 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #15
 ; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    csel x8, x8, x0, lt
+; CHECK-NEXT:    csel x8, x8, x0, mi
 ; CHECK-NEXT:    asr x0, x8, #4
 ; CHECK-NEXT:    ret
+;
+; CHECK-FAST-ISEL-LABEL: sdiv_i64_pos:
+; CHECK-FAST-ISEL:       // %bb.0:
+; CHECK-FAST-ISEL-NEXT:    add x8, x0, #15
+; CHECK-FAST-ISEL-NEXT:    cmp x0, #0
+; CHECK-FAST-ISEL-NEXT:    csel x8, x8, x0, lt
+; CHECK-FAST-ISEL-NEXT:    asr x0, x8, #4
+; CHECK-FAST-ISEL-NEXT:    ret
   %1 = sdiv i64 %a, 16
   ret i64 %1
 }
@@ -61,9 +95,17 @@ define i64 @sdiv_i64_neg(i64 %a) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    add x8, x0, #15
 ; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    csel x8, x8, x0, lt
+; CHECK-NEXT:    csel x8, x8, x0, mi
 ; CHECK-NEXT:    neg x0, x8, asr #4
 ; CHECK-NEXT:    ret
+;
+; CHECK-FAST-ISEL-LABEL: sdiv_i64_neg:
+; CHECK-FAST-ISEL:       // %bb.0:
+; CHECK-FAST-ISEL-NEXT:    add x8, x0, #15
+; CHECK-FAST-ISEL-NEXT:    cmp x0, #0
+; CHECK-FAST-ISEL-NEXT:    csel x8, x8, x0, lt
+; CHECK-FAST-ISEL-NEXT:    neg x0, x8, asr #4
+; CHECK-FAST-ISEL-NEXT:    ret
   %1 = sdiv i64 %a, -16
   ret i64 %1
 }
diff --git a/llvm/test/CodeGen/AArch64/fcmp-fp128.ll b/llvm/test/CodeGen/AArch64/fcmp-fp128.ll
index 503cb8c533bab..a2b4b61864741 100644
--- a/llvm/test/CodeGen/AArch64/fcmp-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp-fp128.ll
@@ -98,7 +98,7 @@ define double @olt(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    fcsel d0, d9, d8, lt
+; CHECK-SD-NEXT:    fcsel d0, d9, d8, mi
 ; CHECK-SD-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
 ;
@@ -115,7 +115,7 @@ define double @olt(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    fcsel d0, d8, d9, lt
+; CHECK-GI-NEXT:    fcsel d0, d8, d9, mi
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -412,7 +412,7 @@ define double @uge(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    fcsel d0, d9, d8, ge
+; CHECK-SD-NEXT:    fcsel d0, d9, d8, pl
 ; CHECK-SD-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
 ;
@@ -429,7 +429,7 @@ define double @uge(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    fcsel d0, d8, d9, ge
+; CHECK-GI-NEXT:    fcsel d0, d8, d9, pl
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -452,7 +452,7 @@ define double @ult(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-SD-NEXT:    bl __getf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    fcsel d0, d9, d8, lt
+; CHECK-SD-NEXT:    fcsel d0, d9, d8, mi
 ; CHECK-SD-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
 ;
@@ -469,7 +469,7 @@ define double @ult(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-GI-NEXT:    bl __getf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    fcsel d0, d8, d9, lt
+; CHECK-GI-NEXT:    fcsel d0, d8, d9, mi
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index f33f57e8751ca..6d673f1204c7f 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -15,7 +15,7 @@ define fp128 @f128_fp128(fp128 %a, fp128 %b, fp128 %d, fp128 %e) {
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    b.ge .LBB0_2
+; CHECK-SD-NEXT:    b.pl .LBB0_2
 ; CHECK-SD-NEXT:  // %bb.1: // %entry
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:  .LBB0_2: // %entry
@@ -36,9 +36,9 @@ define fp128 @f128_fp128(fp128 %a, fp128 %b, fp128 %d, fp128 %e) {
 ; CHECK-GI-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
 ; CHECK-GI-NEXT:    mov d0, v2.d[1]
 ; CHECK-GI-NEXT:    mov d1, v3.d[1]
-; CHECK-GI-NEXT:    fcsel d2, d2, d3, lt
+; CHECK-GI-NEXT:    fcsel d2, d2, d3, mi
 ; CHECK-GI-NEXT:    fmov x8, d2
-; CHECK-GI-NEXT:    fcsel d1, d0, d1, lt
+; CHECK-GI-NEXT:    fcsel d1, d0, d1, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    fmov x8, d1
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
@@ -71,13 +71,13 @@ define i128 @f128_i128(fp128 %a, fp128 %b, i128 %d, i128 %e) {
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    csel x20, x22, x20, lt
+; CHECK-SD-NEXT:    csel x20, x22, x20, mi
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    mov w8, w0
 ; CHECK-SD-NEXT:    mov x0, x20
 ; CHECK-SD-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w8, #0
-; CHECK-SD-NEXT:    csel x1, x21, x19, lt
+; CHECK-SD-NEXT:    csel x1, x21, x19, mi
 ; CHECK-SD-NEXT:    ldp x20, x19, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldp x22, x21, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    add sp, sp, #80
@@ -100,8 +100,8 @@ define i128 @f128_i128(fp128 %a, fp128 %b, i128 %d, i128 %e) {
 ; CHECK-GI-NEXT:    mov x22, x3
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x0, x19, x21, lt
-; CHECK-GI-NEXT:    csel x1, x20, x22, lt
+; CHECK-GI-NEXT:    csel x0, x19, x21, mi
+; CHECK-GI-NEXT:    csel x1, x20, x22, mi
 ; CHECK-GI-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ldp x22, x21, [sp, #16] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ldr x30, [sp], #48 // 8-byte Folded Reload
@@ -126,7 +126,7 @@ define double @f128_double(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    fcsel d0, d9, d8, lt
+; CHECK-SD-NEXT:    fcsel d0, d9, d8, mi
 ; CHECK-SD-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
 ;
@@ -143,7 +143,7 @@ define double @f128_double(fp128 %a, fp128 %b, double %d, double %e) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    fcsel d0, d8, d9, lt
+; CHECK-GI-NEXT:    fcsel d0, d8, d9, mi
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -166,7 +166,7 @@ define float @f128_float(fp128 %a, fp128 %b, float %d, float %e) {
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    fcsel s0, s9, s8, lt
+; CHECK-SD-NEXT:    fcsel s0, s9, s8, mi
 ; CHECK-SD-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
 ;
@@ -183,7 +183,7 @@ define float @f128_float(fp128 %a, fp128 %b, float %d, float %e) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    fcsel s0, s8, s9, lt
+; CHECK-GI-NEXT:    fcsel s0, s8, s9, mi
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
 entry:
@@ -205,7 +205,7 @@ define i32 @f128_i32(fp128 %a, fp128 %b, i32 %d, i32 %e) {
 ; CHECK-SD-NEXT:    mov w20, w0
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    csel w0, w20, w19, lt
+; CHECK-SD-NEXT:    csel w0, w20, w19, mi
 ; CHECK-SD-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
 ; CHECK-SD-NEXT:    ret
@@ -222,7 +222,7 @@ define i32 @f128_i32(fp128 %a, fp128 %b, i32 %d, i32 %e) {
 ; CHECK-GI-NEXT:    mov w20, w1
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w19, w20, lt
+; CHECK-GI-NEXT:    csel w0, w19, w20, mi
 ; CHECK-GI-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
 ; CHECK-GI-NEXT:    ret
@@ -246,7 +246,7 @@ define half @f128_half(fp128 %a, fp128 %b, half %d, half %e) {
 ; CHECK-SD-NOFP16-NEXT:    bl __lttf2
 ; CHECK-SD-NOFP16-NEXT:    cmp w0, #0
 ; CHECK-SD-NOFP16-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-NOFP16-NEXT:    fcsel s0, s9, s8, lt
+; CHECK-SD-NOFP16-NEXT:    fcsel s0, s9, s8, mi
 ; CHECK-SD-NOFP16-NEXT:    // kill: def $h0 killed $h0 killed $s0
 ; CHECK-SD-NOFP16-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-NOFP16-NEXT:    ret
@@ -264,7 +264,7 @@ define half @f128_half(fp128 %a, fp128 %b, half %d, half %e) {
 ; CHECK-SD-FP16-NEXT:    bl __lttf2
 ; CHECK-SD-FP16-NEXT:    cmp w0, #0
 ; CHECK-SD-FP16-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-SD-FP16-NEXT:    fcsel h0, h9, h8, lt
+; CHECK-SD-FP16-NEXT:    fcsel h0, h9, h8, mi
 ; CHECK-SD-FP16-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
 ; CHECK-SD-FP16-NEXT:    ret
 ;
@@ -283,7 +283,7 @@ define half @f128_half(fp128 %a, fp128 %b, half %d, half %e) {
 ; CHECK-GI-NEXT:    fmov w9, s9
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    csel w8, w8, w9, lt
+; CHECK-GI-NEXT:    csel w8, w8, w9, mi
 ; CHECK-GI-NEXT:    fmov s0, w8
 ; CHECK-GI-NEXT:    // kill: def $h0 killed $h0 killed $s0
 ; CHECK-GI-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
@@ -438,7 +438,7 @@ define <2 x fp128> @v2f128_fp128(<2 x fp128> %a, <2 x fp128> %b, <2 x fp128> %d,
 ; CHECK-SD-NEXT:    stp q7, q6, [sp, #64] // 32-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    b.ge .LBB12_2
+; CHECK-SD-NEXT:    b.pl .LBB12_2
 ; CHECK-SD-NEXT:  // %bb.1: // %entry
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    str q0, [sp, #80] // 16-byte Folded Spill
@@ -447,7 +447,7 @@ define <2 x fp128> @v2f128_fp128(<2 x fp128> %a, <2 x fp128> %b, <2 x fp128> %d,
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    b.ge .LBB12_4
+; CHECK-SD-NEXT:    b.pl .LBB12_4
 ; CHECK-SD-NEXT:  // %bb.3: // %entry
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:  .LBB12_4: // %entry
@@ -476,18 +476,18 @@ define <2 x fp128> @v2f128_fp128(<2 x fp128> %a, <2 x fp128> %b, <2 x fp128> %d,
 ; CHECK-GI-NEXT:    ldp x30, x19, [sp, #96] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov d0, v2.d[1]
 ; CHECK-GI-NEXT:    mov d1, v3.d[1]
-; CHECK-GI-NEXT:    fcsel d2, d2, d3, lt
+; CHECK-GI-NEXT:    fcsel d2, d2, d3, mi
 ; CHECK-GI-NEXT:    fmov x8, d2
-; CHECK-GI-NEXT:    fcsel d3, d0, d1, lt
+; CHECK-GI-NEXT:    fcsel d3, d0, d1, mi
 ; CHECK-GI-NEXT:    ldp q5, q0, [sp, #64] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov d1, v0.d[1]
 ; CHECK-GI-NEXT:    mov d4, v5.d[1]
-; CHECK-GI-NEXT:    fcsel d0, d0, d5, lt
+; CHECK-GI-NEXT:    fcsel d0, d0, d5, mi
 ; CHECK-GI-NEXT:    fmov x9, d0
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    fmov x8, d3
-; CHECK-GI-NEXT:    fcsel d2, d1, d4, lt
+; CHECK-GI-NEXT:    fcsel d2, d1, d4, mi
 ; CHECK-GI-NEXT:    mov v1.d[0], x9
 ; CHECK-GI-NEXT:    fmov x9, d2
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
@@ -513,7 +513,7 @@ define <3 x fp128> @v3f128_fp128(<3 x fp128> %a, <3 x fp128> %b, <3 x fp128> %d,
 ; CHECK-SD-NEXT:    stp q6, q7, [sp, #64] // 32-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    b.lt .LBB13_2
+; CHECK-SD-NEXT:    b.mi .LBB13_2
 ; CHECK-SD-NEXT:  // %bb.1:
 ; CHECK-SD-NEXT:    ldr q0, [sp, #128]
 ; CHECK-SD-NEXT:    str q0, [sp, #64] // 16-byte Folded Spill
@@ -521,7 +521,7 @@ define <3 x fp128> @v3f128_fp128(<3 x fp128> %a, <3 x fp128> %b, <3 x fp128> %d,
 ; CHECK-SD-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    b.lt .LBB13_4
+; CHECK-SD-NEXT:    b.mi .LBB13_4
 ; CHECK-SD-NEXT:  // %bb.3:
 ; CHECK-SD-NEXT:    ldr q0, [sp, #144]
 ; CHECK-SD-NEXT:    str q0, [sp, #80] // 16-byte Folded Spill
@@ -531,7 +531,7 @@ define <3 x fp128> @v3f128_fp128(<3 x fp128> %a, <3 x fp128> %b, <3 x fp128> %d,
 ; CHECK-SD-NEXT:    add x8, sp, #160
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    add x9, sp, #112
-; CHECK-SD-NEXT:    csel x8, x9, x8, lt
+; CHECK-SD-NEXT:    csel x8, x9, x8, mi
 ; CHECK-SD-NEXT:    ldp q0, q1, [sp, #64] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q2, [x8]
 ; CHECK-SD-NEXT:    ldr x30, [sp, #96] // 8-byte Folded Reload
@@ -571,24 +571,24 @@ define <3 x fp128> @v3f128_fp128(<3 x fp128> %a, <3 x fp128> %b, <3 x fp128> %d,
 ; CHECK-GI-NEXT:    ldr x30, [sp, #160] // 8-byte Folded Reload
 ; CHECK-GI-NEXT:    mov d0, v4.d[1]
 ; CHECK-GI-NEXT:    mov d1, v5.d[1]
-; CHECK-GI-NEXT:    fcsel d4, d4, d5, lt
+; CHECK-GI-NEXT:    fcsel d4, d4, d5, mi
 ; CHECK-GI-NEXT:    mov d2, v7.d[1]
 ; CHECK-GI-NEXT:    mov d3, v6.d[1]
 ; CHECK-GI-NEXT:    fmov x8, d4
-; CHECK-GI-NEXT:    fcsel d5, d0, d1, lt
+; CHECK-GI-NEXT:    fcsel d5, d0, d1, mi
 ; CHECK-GI-NEXT:    cmp w20, #0
-; CHECK-GI-NEXT:    fcsel d1, d7, d6, lt
+; CHECK-GI-NEXT:    fcsel d1, d7, d6, mi
 ; CHECK-GI-NEXT:    ldp q7, q0, [sp, #128] // 32-byte Folded Reload
-; CHECK-GI-NEXT:    fcsel d3, d2, d3, lt
+; CHECK-GI-NEXT:    fcsel d3, d2, d3, mi
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldp x20, x19, [sp, #176] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov d2, v0.d[1]
 ; CHECK-GI-NEXT:    mov d6, v7.d[1]
-; CHECK-GI-NEXT:    fcsel d7, d0, d7, lt
+; CHECK-GI-NEXT:    fcsel d7, d0, d7, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    fmov x8, d1
 ; CHECK-GI-NEXT:    fmov x9, d7
-; CHECK-GI-NEXT:    fcsel d4, d2, d6, lt
+; CHECK-GI-NEXT:    fcsel d4, d2, d6, mi
 ; CHECK-GI-NEXT:    mov v1.d[0], x8
 ; CHECK-GI-NEXT:    fmov x8, d5
 ; CHECK-GI-NEXT:    mov v2.d[0], x9
@@ -621,7 +621,7 @@ define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double>
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
-; CHECK-SD-NEXT:    cset w8, lt
+; CHECK-SD-NEXT:    cset w8, mi
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
@@ -630,7 +630,7 @@ define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double>
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #80] // 8-byte Folded Reload
-; CHECK-SD-NEXT:    cset w8, lt
+; CHECK-SD-NEXT:    cset w8, mi
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    mov v0.d[1], v1.d[0]
@@ -652,11 +652,11 @@ define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double>
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w19, lt
+; CHECK-GI-NEXT:    cset w19, mi
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    fmov d0, x19
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, lt
+; CHECK-GI-NEXT:    cset w8, mi
 ; CHECK-GI-NEXT:    ldp q2, q1, [sp, #32] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    ldp x30, x19, [sp, #64] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
@@ -696,7 +696,7 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
-; CHECK-SD-NEXT:    cset w8, lt
+; CHECK-SD-NEXT:    cset w8, mi
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d0, x8
 ; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
@@ -704,7 +704,7 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-SD-NEXT:    bl __lttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-SD-NEXT:    cset w8, lt
+; CHECK-SD-NEXT:    cset w8, mi
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    fmov d1, x8
 ; CHECK-SD-NEXT:    mov v1.d[1], v0.d[0]
@@ -714,7 +714,7 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-SD-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldp q2, q4, [sp, #64] // 32-byte Folded Reload
-; CHECK-SD-NEXT:    cset w8, lt
+; CHECK-SD-NEXT:    cset w8, mi
 ; CHECK-SD-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-SD-NEXT:    ldr q3, [sp, #96] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr x30, [sp, #144] // 8-byte Folded Reload
@@ -755,11 +755,11 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w21, lt
+; CHECK-GI-NEXT:    cset w21, mi
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w22, lt
+; CHECK-GI-NEXT:    cset w22, mi
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    ldp q0, q2, [sp, #64] // 32-byte Folded Reload
 ; CHECK-GI-NEXT:    sbfx x8, x21, #0, #1
@@ -770,7 +770,7 @@ define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double>
 ; CHECK-GI-NEXT:    ldr x30, [sp, #128] // 8-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v2.d[1], v0.d[0]
 ; CHECK-GI-NEXT:    fmov d0, x8
-; CHECK-GI-NEXT:    cset w8, lt
+; CHECK-GI-NEXT:    cset w8, mi
 ; CHECK-GI-NEXT:    mov v3.d[1], v4.d[0]
 ; CHECK-GI-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-GI-NEXT:    mov v1.d[1], x9
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index f2a14a9b73fa1..919585a9826b9 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -2440,7 +2440,7 @@ define i32 @fcmp_olt_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __lttf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    cset w0, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
@@ -2488,7 +2488,7 @@ define i32 @fcmp_oge_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    cset w0, pl
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
@@ -2544,7 +2544,7 @@ define i32 @fcmp_ult_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    cset w0, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
@@ -2592,7 +2592,7 @@ define i32 @fcmp_uge_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __lttf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    cset w0, pl
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmp.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
@@ -2648,7 +2648,7 @@ define i32 @fcmps_olt_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __lttf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    cset w0, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"olt", metadata !"fpexcept.strict") #0
@@ -2696,7 +2696,7 @@ define i32 @fcmps_oge_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    cset w0, pl
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"oge", metadata !"fpexcept.strict") #0
@@ -2752,7 +2752,7 @@ define i32 @fcmps_ult_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __getf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, lt
+; CHECK-NEXT:    cset w0, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"ult", metadata !"fpexcept.strict") #0
@@ -2800,7 +2800,7 @@ define i32 @fcmps_uge_f128(fp128 %a, fp128 %b) #0 {
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl __lttf2
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cset w0, ge
+; CHECK-NEXT:    cset w0, pl
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
   %cmp = call i1 @llvm.experimental.constrained.fcmps.f128(fp128 %a, fp128 %b, metadata !"uge", metadata !"fpexcept.strict") #0
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat.ll b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
index 06dc11d413fae..00de1530fb72c 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat.ll
@@ -903,7 +903,7 @@ define i64 @ustest_f64i64_mm(double %x) {
 ; CHECK-NEXT:    csinc x8, x1, xzr, lt
 ; CHECK-NEXT:    csel x9, x0, xzr, lt
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel x0, xzr, x9, lt
+; CHECK-NEXT:    csel x0, xzr, x9, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -956,7 +956,7 @@ define i64 @ustest_f32i64_mm(float %x) {
 ; CHECK-NEXT:    csinc x8, x1, xzr, lt
 ; CHECK-NEXT:    csel x9, x0, xzr, lt
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel x0, xzr, x9, lt
+; CHECK-NEXT:    csel x0, xzr, x9, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
@@ -1015,7 +1015,7 @@ define i64 @ustest_f16i64_mm(half %x) {
 ; CHECK-NEXT:    csinc x8, x1, xzr, lt
 ; CHECK-NEXT:    csel x9, x0, xzr, lt
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel x0, xzr, x9, lt
+; CHECK-NEXT:    csel x0, xzr, x9, mi
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
index 9157bcba59e9b..83ea72c865283 100644
--- a/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/AArch64/fpclamptosat_vec.ll
@@ -1009,9 +1009,9 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
 ; CHECK-NEXT:    csel x11, x19, xzr, lt
 ; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    csel x10, xzr, x11, lt
+; CHECK-NEXT:    csel x10, xzr, x11, mi
 ; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    csel x8, xzr, x8, lt
+; CHECK-NEXT:    csel x8, xzr, x8, mi
 ; CHECK-NEXT:    fmov d0, x10
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
@@ -1104,9 +1104,9 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
 ; CHECK-NEXT:    csel x11, x19, xzr, lt
 ; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    csel x10, xzr, x11, lt
+; CHECK-NEXT:    csel x10, xzr, x11, mi
 ; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    csel x8, xzr, x8, lt
+; CHECK-NEXT:    csel x8, xzr, x8, mi
 ; CHECK-NEXT:    fmov d0, x10
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
@@ -1215,9 +1215,9 @@ define <2 x i64> @ustest_f16i64_mm(<2 x half> %x) {
 ; CHECK-NEXT:    csel x11, x19, xzr, lt
 ; CHECK-NEXT:    cmp x10, #0
 ; CHECK-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
-; CHECK-NEXT:    csel x10, xzr, x11, lt
+; CHECK-NEXT:    csel x10, xzr, x11, mi
 ; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    csel x8, xzr, x8, lt
+; CHECK-NEXT:    csel x8, xzr, x8, mi
 ; CHECK-NEXT:    fmov d0, x10
 ; CHECK-NEXT:    fmov d1, x8
 ; CHECK-NEXT:    mov v0.d[1], v1.d[0]
diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
index 39e2db3a52d2c..e3aef487890f9 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-scalar.ll
@@ -24,7 +24,7 @@ define i1 @test_signed_i1_f32(float %f) nounwind {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    fcvtzs w8, s0
 ; CHECK-SD-NEXT:    ands w8, w8, w8, asr #31
-; CHECK-SD-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-SD-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-SD-NEXT:    and w0, w8, #0x1
 ; CHECK-SD-NEXT:    ret
 ;
@@ -32,9 +32,9 @@ define i1 @test_signed_i1_f32(float %f) nounwind {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    fcvtzs w8, s0
 ; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csel w8, w8, wzr, lt
+; CHECK-GI-NEXT:    csel w8, w8, wzr, mi
 ; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-GI-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-NEXT:    ret
     %x = call i1 @llvm.fptosi.sat.i1.f32(float %f)
@@ -269,7 +269,7 @@ define i1 @test_signed_i1_f64(double %f) nounwind {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    fcvtzs w8, d0
 ; CHECK-SD-NEXT:    ands w8, w8, w8, asr #31
-; CHECK-SD-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-SD-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-SD-NEXT:    and w0, w8, #0x1
 ; CHECK-SD-NEXT:    ret
 ;
@@ -277,9 +277,9 @@ define i1 @test_signed_i1_f64(double %f) nounwind {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    fcvtzs w8, d0
 ; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csel w8, w8, wzr, lt
+; CHECK-GI-NEXT:    csel w8, w8, wzr, mi
 ; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-GI-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-NEXT:    ret
     %x = call i1 @llvm.fptosi.sat.i1.f64(double %f)
@@ -519,7 +519,7 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 ; CHECK-SD-CVT-NEXT:    fcvt s0, h0
 ; CHECK-SD-CVT-NEXT:    fcvtzs w8, s0
 ; CHECK-SD-CVT-NEXT:    ands w8, w8, w8, asr #31
-; CHECK-SD-CVT-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-SD-CVT-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-SD-CVT-NEXT:    and w0, w8, #0x1
 ; CHECK-SD-CVT-NEXT:    ret
 ;
@@ -527,7 +527,7 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 ; CHECK-SD-FP16:       // %bb.0:
 ; CHECK-SD-FP16-NEXT:    fcvtzs w8, h0
 ; CHECK-SD-FP16-NEXT:    ands w8, w8, w8, asr #31
-; CHECK-SD-FP16-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-SD-FP16-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-SD-FP16-NEXT:    and w0, w8, #0x1
 ; CHECK-SD-FP16-NEXT:    ret
 ;
@@ -536,9 +536,9 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 ; CHECK-GI-CVT-NEXT:    fcvt s0, h0
 ; CHECK-GI-CVT-NEXT:    fcvtzs w8, s0
 ; CHECK-GI-CVT-NEXT:    cmp w8, #0
-; CHECK-GI-CVT-NEXT:    csel w8, w8, wzr, lt
+; CHECK-GI-CVT-NEXT:    csel w8, w8, wzr, mi
 ; CHECK-GI-CVT-NEXT:    cmp w8, #0
-; CHECK-GI-CVT-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-CVT-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-GI-CVT-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-CVT-NEXT:    ret
 ;
@@ -546,9 +546,9 @@ define i1 @test_signed_i1_f16(half %f) nounwind {
 ; CHECK-GI-FP16:       // %bb.0:
 ; CHECK-GI-FP16-NEXT:    fcvtzs w8, h0
 ; CHECK-GI-FP16-NEXT:    cmp w8, #0
-; CHECK-GI-FP16-NEXT:    csel w8, w8, wzr, lt
+; CHECK-GI-FP16-NEXT:    csel w8, w8, wzr, mi
 ; CHECK-GI-FP16-NEXT:    cmp w8, #0
-; CHECK-GI-FP16-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-GI-FP16-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-GI-FP16-NEXT:    and w0, w8, #0x1
 ; CHECK-GI-FP16-NEXT:    ret
     %x = call i1 @llvm.fptosi.sat.i1.f16(half %f)
@@ -959,7 +959,7 @@ define i32 @test_signed_f128_i32(fp128 %f) {
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    mov w8, #-2147483648 // =0x80000000
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-SD-NEXT:    csel w19, w8, w0, lt
+; CHECK-SD-NEXT:    csel w19, w8, w0, mi
 ; CHECK-SD-NEXT:    adrp x8, .LCPI30_1
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI30_1]
 ; CHECK-SD-NEXT:    bl __gttf2
@@ -1001,11 +1001,11 @@ define i32 @test_signed_f128_i32(fp128 %f) {
 ; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI30_0]
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    mov x8, #281474976448512 // =0xfffffffc0000
 ; CHECK-GI-NEXT:    movk x8, #16413, lsl #48
-; CHECK-GI-NEXT:    csel x8, x20, x8, lt
+; CHECK-GI-NEXT:    csel x8, x20, x8, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
index 9c21d2bf083a2..77dd6c6425207 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
@@ -499,7 +499,7 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    mov w8, #-2147483648 // =0x80000000
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-SD-NEXT:    csel w19, w8, w0, lt
+; CHECK-SD-NEXT:    csel w19, w8, w0, mi
 ; CHECK-SD-NEXT:    adrp x8, .LCPI14_1
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI14_1]
 ; CHECK-SD-NEXT:    bl __gttf2
@@ -542,11 +542,11 @@ define <1 x i32> @test_signed_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI14_0]
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    mov x8, #281474976448512 // =0xfffffffc0000
 ; CHECK-GI-NEXT:    movk x8, #16413, lsl #48
-; CHECK-GI-NEXT:    csel x8, x20, x8, lt
+; CHECK-GI-NEXT:    csel x8, x20, x8, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -592,7 +592,7 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI15_1]
 ; CHECK-SD-NEXT:    mov w20, #-2147483648 // =0x80000000
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -612,7 +612,7 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -664,10 +664,10 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x22, #281474976448512 // =0xfffffffc0000
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    movk x22, #16413, lsl #48
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x21, x22, lt
+; CHECK-GI-NEXT:    csel x8, x21, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -690,9 +690,9 @@ define <2 x i32> @test_signed_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x20
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x22, lt
+; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -742,7 +742,7 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI16_1]
 ; CHECK-SD-NEXT:    mov w20, #-2147483648 // =0x80000000
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -762,7 +762,7 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -780,7 +780,7 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -833,10 +833,10 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x22, #281474976448512 // =0xfffffffc0000
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    movk x22, #16413, lsl #48
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x21, x22, lt
+; CHECK-GI-NEXT:    csel x8, x21, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -858,9 +858,9 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x23
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x23, x22, lt
+; CHECK-GI-NEXT:    csel x8, x23, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -883,9 +883,9 @@ define <3 x i32> @test_signed_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x20
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x22, lt
+; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
@@ -936,7 +936,7 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI17_1]
 ; CHECK-SD-NEXT:    mov w20, #-2147483648 // =0x80000000
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -955,7 +955,7 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -976,7 +976,7 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -996,7 +996,7 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, w20, w0, lt
+; CHECK-SD-NEXT:    csel w19, w20, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -1049,10 +1049,10 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x22, #281474976448512 // =0xfffffffc0000
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    movk x22, #16413, lsl #48
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x21, x22, lt
+; CHECK-GI-NEXT:    csel x8, x21, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -1075,9 +1075,9 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x23
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x23, x22, lt
+; CHECK-GI-NEXT:    csel x8, x23, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -1099,9 +1099,9 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x24
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x24, x22, lt
+; CHECK-GI-NEXT:    csel x8, x24, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -1123,9 +1123,9 @@ define <4 x i32> @test_signed_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x20
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x22, lt
+; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
@@ -2359,9 +2359,9 @@ define <2 x i1> @test_signed_v2f64_v2i1(<2 x double> %f) {
 ; CHECK-SD-NEXT:    fcvtzs w9, d0
 ; CHECK-SD-NEXT:    fcvtzs w8, d1
 ; CHECK-SD-NEXT:    ands w8, w8, w8, asr #31
-; CHECK-SD-NEXT:    csinv w8, w8, wzr, ge
+; CHECK-SD-NEXT:    csinv w8, w8, wzr, pl
 ; CHECK-SD-NEXT:    ands w9, w9, w9, asr #31
-; CHECK-SD-NEXT:    csinv w9, w9, wzr, ge
+; CHECK-SD-NEXT:    csinv w9, w9, wzr, pl
 ; CHECK-SD-NEXT:    fmov s0, w9
 ; CHECK-SD-NEXT:    mov v0.s[1], w8
 ; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 killed $q0
@@ -5425,7 +5425,7 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI86_1]
 ; CHECK-SD-NEXT:    mov x20, #-9223372036854775808 // =0x8000000000000000
-; CHECK-SD-NEXT:    csel x19, x20, x0, lt
+; CHECK-SD-NEXT:    csel x19, x20, x0, mi
 ; CHECK-SD-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -5447,7 +5447,7 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel x19, x20, x0, lt
+; CHECK-SD-NEXT:    csel x19, x20, x0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -5500,9 +5500,9 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov x22, #-1125899906842624 // =0xfffc000000000000
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x23, #4629137466983448575 // =0x403dffffffffffff
-; CHECK-GI-NEXT:    csel x8, x19, x22, lt
+; CHECK-GI-NEXT:    csel x8, x19, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x21, x23, lt
+; CHECK-GI-NEXT:    csel x8, x21, x23, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfdi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -5525,9 +5525,9 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x20
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, x22, lt
+; CHECK-GI-NEXT:    csel x8, x19, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x23, lt
+; CHECK-GI-NEXT:    csel x8, x20, x23, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixtfdi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
index 46950e7a60349..07e49e331415e 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-scalar.ll
@@ -777,7 +777,7 @@ define i32 @test_unsigned_f128_i32(fp128 %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI30_1]
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    csinv w0, w19, wzr, le
@@ -811,11 +811,11 @@ define i32 @test_unsigned_f128_i32(fp128 %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    mov x8, #281474976579584 // =0xfffffffe0000
 ; CHECK-GI-NEXT:    movk x8, #16414, lsl #48
-; CHECK-GI-NEXT:    csel x8, x20, x8, lt
+; CHECK-GI-NEXT:    csel x8, x20, x8, mi
 ; CHECK-GI-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    add sp, sp, #48
diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index 44847a41287d6..1b3a8a3b70e13 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -463,7 +463,7 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI14_1]
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    csinv w8, w19, wzr, le
@@ -497,11 +497,11 @@ define <1 x i32> @test_unsigned_v1f128_v1i32(<1 x fp128> %f) {
 ; CHECK-GI-NEXT:    ldr q1, [x8, :lo12:.LCPI14_0]
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
 ; CHECK-GI-NEXT:    mov x8, #281474976579584 // =0xfffffffe0000
 ; CHECK-GI-NEXT:    movk x8, #16414, lsl #48
-; CHECK-GI-NEXT:    csel x8, x20, x8, lt
+; CHECK-GI-NEXT:    csel x8, x20, x8, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldp x20, x19, [sp, #32] // 16-byte Folded Reload
@@ -537,7 +537,7 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI15_1]
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
@@ -551,7 +551,7 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr x30, [sp, #64] // 8-byte Folded Reload
@@ -595,10 +595,10 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x21, #281474976579584 // =0xfffffffe0000
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    movk x21, #16414, lsl #48
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x21, lt
+; CHECK-GI-NEXT:    csel x8, x20, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp, #16] // 32-byte Folded Reload
@@ -615,9 +615,9 @@ define <2 x i32> @test_unsigned_v2f128_v2i32(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x22
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x20, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x20, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x22, x21, lt
+; CHECK-GI-NEXT:    csel x8, x22, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    fmov s0, w19
@@ -657,7 +657,7 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI16_1]
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
@@ -670,7 +670,7 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
@@ -683,7 +683,7 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #64] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    csinv w8, w19, wzr, le
@@ -727,10 +727,10 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x21, #281474976579584 // =0xfffffffe0000
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    movk x21, #16414, lsl #48
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x21, lt
+; CHECK-GI-NEXT:    csel x8, x20, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -747,9 +747,9 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x22
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x20, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x20, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x22, x21, lt
+; CHECK-GI-NEXT:    csel x8, x22, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
@@ -766,9 +766,9 @@ define <3 x i32> @test_unsigned_v3f128_v3i32(<3 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x23
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x22, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x22, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x23, x21, lt
+; CHECK-GI-NEXT:    csel x8, x23, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    fmov s0, w19
@@ -809,7 +809,7 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI17_1]
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    str q1, [sp, #48] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -823,7 +823,7 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
@@ -838,7 +838,7 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    bl __fixunstfsi
 ; CHECK-SD-NEXT:    ldp q0, q1, [sp, #32] // 32-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -853,7 +853,7 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel w19, wzr, w0, lt
+; CHECK-SD-NEXT:    csel w19, wzr, w0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -900,10 +900,10 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x22, #281474976579584 // =0xfffffffe0000
-; CHECK-GI-NEXT:    csel x8, x19, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x19, xzr, mi
 ; CHECK-GI-NEXT:    movk x22, #16414, lsl #48
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x22, lt
+; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
@@ -921,9 +921,9 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x21
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x20, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x20, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x21, x22, lt
+; CHECK-GI-NEXT:    csel x8, x21, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
@@ -941,9 +941,9 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x23
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x21, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x21, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x23, x22, lt
+; CHECK-GI-NEXT:    csel x8, x23, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
@@ -960,9 +960,9 @@ define <4 x i32> @test_unsigned_v4f128_v4i32(<4 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x24
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x23, xzr, lt
+; CHECK-GI-NEXT:    csel x8, x23, xzr, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x24, x22, lt
+; CHECK-GI-NEXT:    csel x8, x24, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfsi
 ; CHECK-GI-NEXT:    fmov s0, w19
@@ -4419,7 +4419,7 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #32] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
 ; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI86_1]
-; CHECK-SD-NEXT:    csel x19, xzr, x0, lt
+; CHECK-SD-NEXT:    csel x19, xzr, x0, mi
 ; CHECK-SD-NEXT:    str q1, [sp] // 16-byte Folded Spill
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
@@ -4435,7 +4435,7 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-SD-NEXT:    ldr q0, [sp, #48] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    ldr q1, [sp] // 16-byte Folded Reload
 ; CHECK-SD-NEXT:    cmp w19, #0
-; CHECK-SD-NEXT:    csel x19, xzr, x0, lt
+; CHECK-SD-NEXT:    csel x19, xzr, x0, mi
 ; CHECK-SD-NEXT:    bl __gttf2
 ; CHECK-SD-NEXT:    cmp w0, #0
 ; CHECK-SD-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
@@ -4480,9 +4480,9 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov x21, #-562949953421312 // =0xfffe000000000000
 ; CHECK-GI-NEXT:    cmp w0, #0
 ; CHECK-GI-NEXT:    mov x22, #4629418941960159231 // =0x403effffffffffff
-; CHECK-GI-NEXT:    csel x8, x19, x21, lt
+; CHECK-GI-NEXT:    csel x8, x19, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x20, x22, lt
+; CHECK-GI-NEXT:    csel x8, x20, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfdi
 ; CHECK-GI-NEXT:    ldp q1, q0, [sp, #16] // 32-byte Folded Reload
@@ -4499,9 +4499,9 @@ define <2 x i64> @test_signed_v2f128_v2i64(<2 x fp128> %f) {
 ; CHECK-GI-NEXT:    mov v0.d[1], x23
 ; CHECK-GI-NEXT:    bl __lttf2
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x8, x20, x21, lt
+; CHECK-GI-NEXT:    csel x8, x20, x21, mi
 ; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    csel x8, x23, x22, lt
+; CHECK-GI-NEXT:    csel x8, x23, x22, mi
 ; CHECK-GI-NEXT:    mov v0.d[1], x8
 ; CHECK-GI-NEXT:    bl __fixunstfdi
 ; CHECK-GI-NEXT:    fmov d0, x19
diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index 81f13b8e7439a..3b43b4209b08f 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -256,7 +256,7 @@ define void @flag_setting() {
 ; CHECK-SD-NEXT:    b.gt .LBB2_4
 ; CHECK-SD-NEXT:  // %bb.1: // %test2
 ; CHECK-SD-NEXT:    tst x9, x10, lsl #63
-; CHECK-SD-NEXT:    b.lt .LBB2_4
+; CHECK-SD-NEXT:    b.mi .LBB2_4
 ; CHECK-SD-NEXT:  // %bb.2: // %test3
 ; CHECK-SD-NEXT:    tst x9, x10, asr #12
 ; CHECK-SD-NEXT:    b.gt .LBB2_4
@@ -277,7 +277,7 @@ define void @flag_setting() {
 ; CHECK-GI-NEXT:    b.gt .LBB2_4
 ; CHECK-GI-NEXT:  // %bb.1: // %test2
 ; CHECK-GI-NEXT:    tst x9, x10, lsl #63
-; CHECK-GI-NEXT:    b.lt .LBB2_4
+; CHECK-GI-NEXT:    b.mi .LBB2_4
 ; CHECK-GI-NEXT:  // %bb.2: // %test3
 ; CHECK-GI-NEXT:    asr x10, x10, #12
 ; CHECK-GI-NEXT:    tst x10, x9
diff --git a/llvm/test/CodeGen/AArch64/min-max-combine.ll b/llvm/test/CodeGen/AArch64/min-max-combine.ll
index 5111f838b73aa..558d4b8b499ba 100644
--- a/llvm/test/CodeGen/AArch64/min-max-combine.ll
+++ b/llvm/test/CodeGen/AArch64/min-max-combine.ll
@@ -123,7 +123,7 @@ define i8 @smini8_zero(i8 %a) {
 ; CHECK-GLOBAL:       // %bb.0:
 ; CHECK-GLOBAL-NEXT:    sxtb w8, w0
 ; CHECK-GLOBAL-NEXT:    cmp w8, #0
-; CHECK-GLOBAL-NEXT:    csel w0, w0, wzr, lt
+; CHECK-GLOBAL-NEXT:    csel w0, w0, wzr, mi
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call i8 @llvm.smin.i8(i8 %a, i8 0)
   ret i8 %c
@@ -148,7 +148,7 @@ define i16 @smini16_zero(i16 %a) {
 ; CHECK-GLOBAL:       // %bb.0:
 ; CHECK-GLOBAL-NEXT:    sxth w8, w0
 ; CHECK-GLOBAL-NEXT:    cmp w8, #0
-; CHECK-GLOBAL-NEXT:    csel w0, w0, wzr, lt
+; CHECK-GLOBAL-NEXT:    csel w0, w0, wzr, mi
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call i16 @llvm.smin.i16(i16 %a, i16 0)
   ret i16 %c
@@ -170,7 +170,7 @@ define i32 @smini32_zero(i32 %a) {
 ; CHECK-GLOBAL-LABEL: smini32_zero:
 ; CHECK-GLOBAL:       // %bb.0:
 ; CHECK-GLOBAL-NEXT:    cmp w0, #0
-; CHECK-GLOBAL-NEXT:    csel w0, w0, wzr, lt
+; CHECK-GLOBAL-NEXT:    csel w0, w0, wzr, mi
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call i32 @llvm.smin.i32(i32 %a, i32 0)
   ret i32 %c
@@ -192,7 +192,7 @@ define i64 @smini64_zero(i64 %a) {
 ; CHECK-GLOBAL-LABEL: smini64_zero:
 ; CHECK-GLOBAL:       // %bb.0:
 ; CHECK-GLOBAL-NEXT:    cmp x0, #0
-; CHECK-GLOBAL-NEXT:    csel x0, x0, xzr, lt
+; CHECK-GLOBAL-NEXT:    csel x0, x0, xzr, mi
 ; CHECK-GLOBAL-NEXT:    ret
   %c = call i64 @llvm.smin.i64(i64 %a, i64 0)
   ret i64 %c
diff --git a/llvm/test/CodeGen/AArch64/pr72777.ll b/llvm/test/CodeGen/AArch64/pr72777.ll
index e9021d605f1fe..fa9f82f8c93c2 100644
--- a/llvm/test/CodeGen/AArch64/pr72777.ll
+++ b/llvm/test/CodeGen/AArch64/pr72777.ll
@@ -4,15 +4,14 @@
 define i64 @f(i64 %0, i64 %1) {
 ; CHECK-LABEL: f:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    orr x9, x1, #0x1
-; CHECK-NEXT:    add x10, x0, x0
-; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
-; CHECK-NEXT:    add x9, x9, x10
-; CHECK-NEXT:    lsl x10, x9, #1
-; CHECK-NEXT:    cmp x9, #0
-; CHECK-NEXT:    cinv x8, x8, ge
-; CHECK-NEXT:    cmp x9, x10, asr #1
-; CHECK-NEXT:    csel x0, x8, x10, ne
+; CHECK-NEXT:    orr x8, x1, #0x1
+; CHECK-NEXT:    add x9, x0, x0
+; CHECK-NEXT:    mov x10, #-9223372036854775808 // =0x8000000000000000
+; CHECK-NEXT:    adds x8, x8, x9
+; CHECK-NEXT:    lsl x9, x8, #1
+; CHECK-NEXT:    cinv x10, x10, pl
+; CHECK-NEXT:    cmp x8, x9, asr #1
+; CHECK-NEXT:    csel x0, x10, x9, ne
 ; CHECK-NEXT:    ret
   %3 = or i64 1, %1
   %4 = add i64 %3, %0
diff --git a/llvm/test/CodeGen/AArch64/sdivpow2.ll b/llvm/test/CodeGen/AArch64/sdivpow2.ll
index 2551be8555ce6..bb18ceb3fe69c 100644
--- a/llvm/test/CodeGen/AArch64/sdivpow2.ll
+++ b/llvm/test/CodeGen/AArch64/sdivpow2.ll
@@ -3,86 +3,143 @@
 ; RUN: llc -mtriple=aarch64-linux-gnu -fast-isel=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,FAST
 
 define i32 @test1(i32 %x) {
-; CHECK-LABEL: test1:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #7
-; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    csel w8, w8, w0, lt
-; CHECK-NEXT:    asr w0, w8, #3
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test1:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    add w8, w0, #7
+; ISEL-NEXT:    cmp w0, #0
+; ISEL-NEXT:    csel w8, w8, w0, mi
+; ISEL-NEXT:    asr w0, w8, #3
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test1:
+; FAST:       // %bb.0:
+; FAST-NEXT:    add w8, w0, #7
+; FAST-NEXT:    cmp w0, #0
+; FAST-NEXT:    csel w8, w8, w0, lt
+; FAST-NEXT:    asr w0, w8, #3
+; FAST-NEXT:    ret
   %div = sdiv i32 %x, 8
   ret i32 %div
 }
 
 define i32 @test2(i32 %x) {
-; CHECK-LABEL: test2:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #7
-; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    csel w8, w8, w0, lt
-; CHECK-NEXT:    neg w0, w8, asr #3
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test2:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    add w8, w0, #7
+; ISEL-NEXT:    cmp w0, #0
+; ISEL-NEXT:    csel w8, w8, w0, mi
+; ISEL-NEXT:    neg w0, w8, asr #3
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test2:
+; FAST:       // %bb.0:
+; FAST-NEXT:    add w8, w0, #7
+; FAST-NEXT:    cmp w0, #0
+; FAST-NEXT:    csel w8, w8, w0, lt
+; FAST-NEXT:    neg w0, w8, asr #3
+; FAST-NEXT:    ret
   %div = sdiv i32 %x, -8
   ret i32 %div
 }
 
 define i32 @test3(i32 %x) {
-; CHECK-LABEL: test3:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #31
-; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    csel w8, w8, w0, lt
-; CHECK-NEXT:    asr w0, w8, #5
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test3:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    add w8, w0, #31
+; ISEL-NEXT:    cmp w0, #0
+; ISEL-NEXT:    csel w8, w8, w0, mi
+; ISEL-NEXT:    asr w0, w8, #5
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test3:
+; FAST:       // %bb.0:
+; FAST-NEXT:    add w8, w0, #31
+; FAST-NEXT:    cmp w0, #0
+; FAST-NEXT:    csel w8, w8, w0, lt
+; FAST-NEXT:    asr w0, w8, #5
+; FAST-NEXT:    ret
   %div = sdiv i32 %x, 32
   ret i32 %div
 }
 
 define i64 @test4(i64 %x) {
-; CHECK-LABEL: test4:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #7
-; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    csel x8, x8, x0, lt
-; CHECK-NEXT:    asr x0, x8, #3
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test4:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    add x8, x0, #7
+; ISEL-NEXT:    cmp x0, #0
+; ISEL-NEXT:    csel x8, x8, x0, mi
+; ISEL-NEXT:    asr x0, x8, #3
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test4:
+; FAST:       // %bb.0:
+; FAST-NEXT:    add x8, x0, #7
+; FAST-NEXT:    cmp x0, #0
+; FAST-NEXT:    csel x8, x8, x0, lt
+; FAST-NEXT:    asr x0, x8, #3
+; FAST-NEXT:    ret
   %div = sdiv i64 %x, 8
   ret i64 %div
 }
 
 define i64 @test5(i64 %x) {
-; CHECK-LABEL: test5:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #7
-; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    csel x8, x8, x0, lt
-; CHECK-NEXT:    neg x0, x8, asr #3
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test5:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    add x8, x0, #7
+; ISEL-NEXT:    cmp x0, #0
+; ISEL-NEXT:    csel x8, x8, x0, mi
+; ISEL-NEXT:    neg x0, x8, asr #3
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test5:
+; FAST:       // %bb.0:
+; FAST-NEXT:    add x8, x0, #7
+; FAST-NEXT:    cmp x0, #0
+; FAST-NEXT:    csel x8, x8, x0, lt
+; FAST-NEXT:    neg x0, x8, asr #3
+; FAST-NEXT:    ret
   %div = sdiv i64 %x, -8
   ret i64 %div
 }
 
 define i64 @test6(i64 %x) {
-; CHECK-LABEL: test6:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    add x8, x0, #63
-; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    csel x8, x8, x0, lt
-; CHECK-NEXT:    asr x0, x8, #6
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test6:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    add x8, x0, #63
+; ISEL-NEXT:    cmp x0, #0
+; ISEL-NEXT:    csel x8, x8, x0, mi
+; ISEL-NEXT:    asr x0, x8, #6
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test6:
+; FAST:       // %bb.0:
+; FAST-NEXT:    add x8, x0, #63
+; FAST-NEXT:    cmp x0, #0
+; FAST-NEXT:    csel x8, x8, x0, lt
+; FAST-NEXT:    asr x0, x8, #6
+; FAST-NEXT:    ret
   %div = sdiv i64 %x, 64
   ret i64 %div
 }
 
 define i64 @test7(i64 %x) {
-; CHECK-LABEL: test7:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, #281474976710655 // =0xffffffffffff
-; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    add x8, x0, x8
-; CHECK-NEXT:    csel x8, x8, x0, lt
-; CHECK-NEXT:    asr x0, x8, #48
-; CHECK-NEXT:    ret
+; ISEL-LABEL: test7:
+; ISEL:       // %bb.0:
+; ISEL-NEXT:    mov x8, #281474976710655 // =0xffffffffffff
+; ISEL-NEXT:    cmp x0, #0
+; ISEL-NEXT:    add x8, x0, x8
+; ISEL-NEXT:    csel x8, x8, x0, mi
+; ISEL-NEXT:    asr x0, x8, #48
+; ISEL-NEXT:    ret
+;
+; FAST-LABEL: test7:
+; FAST:       // %bb.0:
+; FAST-NEXT:    mov x8, #281474976710655 // =0xffffffffffff
+; FAST-NEXT:    cmp x0, #0
+; FAST-NEXT:    add x8, x0, x8
+; FAST-NEXT:    csel x8, x8, x0, lt
+; FAST-NEXT:    asr x0, x8, #48
+; FAST-NEXT:    ret
   %div = sdiv i64 %x, 281474976710656
   ret i64 %div
 }
@@ -132,3 +189,5 @@ define i32 @sdiv_int(i32 %begin, i32 %first) #0 {
 }
 
 attributes #0 = { "target-features"="+sve" vscale_range(2,2) }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/select-constant-xor.ll b/llvm/test/CodeGen/AArch64/select-constant-xor.ll
index fe9a2c0fad830..97ad579a39f78 100644
--- a/llvm/test/CodeGen/AArch64/select-constant-xor.ll
+++ b/llvm/test/CodeGen/AArch64/select-constant-xor.ll
@@ -26,7 +26,7 @@ define i64 @selecti64i64(i64 %a) {
 ; CHECK-GI-NEXT:    mov x8, #-2147483648 // =0xffffffff80000000
 ; CHECK-GI-NEXT:    mov w9, #2147483647 // =0x7fffffff
 ; CHECK-GI-NEXT:    cmp x0, #0
-; CHECK-GI-NEXT:    csel x0, x9, x8, ge
+; CHECK-GI-NEXT:    csel x0, x9, x8, pl
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i64 %a, -1
   %s = select i1 %c, i64 2147483647, i64 -2147483648
@@ -44,7 +44,7 @@ define i32 @selecti64i32(i64 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp x0, #0
 ; CHECK-GI-NEXT:    mov w9, #-2147483648 // =0x80000000
-; CHECK-GI-NEXT:    cset w8, ge
+; CHECK-GI-NEXT:    cset w8, pl
 ; CHECK-GI-NEXT:    sbfx w8, w8, #0, #1
 ; CHECK-GI-NEXT:    add w0, w8, w9
 ; CHECK-GI-NEXT:    ret
@@ -66,7 +66,7 @@ define i64 @selecti32i64(i32 %a) {
 ; CHECK-GI-NEXT:    mov x8, #-2147483648 // =0xffffffff80000000
 ; CHECK-GI-NEXT:    mov w9, #2147483647 // =0x7fffffff
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel x0, x9, x8, ge
+; CHECK-GI-NEXT:    csel x0, x9, x8, pl
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i64 2147483647, i64 -2147483648
@@ -99,7 +99,7 @@ define i32 @selecti32i32(i32 %a) {
 ; CHECK-GI-NEXT:    mov w8, #-85 // =0xffffffab
 ; CHECK-GI-NEXT:    mov w9, #84 // =0x54
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, ge
+; CHECK-GI-NEXT:    csel w0, w9, w8, pl
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i32 84, i32 -85
@@ -118,7 +118,7 @@ define i8 @selecti32i8(i32 %a) {
 ; CHECK-GI-NEXT:    mov w8, #84 // =0x54
 ; CHECK-GI-NEXT:    mov w9, #-85 // =0xffffffab
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w8, w9, ge
+; CHECK-GI-NEXT:    csel w0, w8, w9, pl
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i8 84, i8 -85
@@ -139,7 +139,7 @@ define i32 @selecti8i32(i8 %a) {
 ; CHECK-GI-NEXT:    mov w9, #-85 // =0xffffffab
 ; CHECK-GI-NEXT:    mov w10, #84 // =0x54
 ; CHECK-GI-NEXT:    cmp w8, #0
-; CHECK-GI-NEXT:    csel w0, w10, w9, ge
+; CHECK-GI-NEXT:    csel w0, w10, w9, pl
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i8 %a, -1
   %s = select i1 %c, i32 84, i32 -85
@@ -150,7 +150,7 @@ define i32 @icmpasreq(i32 %input, i32 %a, i32 %b) {
 ; CHECK-SD-LABEL: icmpasreq:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    csel w0, w1, w2, lt
+; CHECK-SD-NEXT:    csel w0, w1, w2, mi
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: icmpasreq:
@@ -193,7 +193,7 @@ define i32 @selecti32i32_0(i32 %a) {
 ; CHECK-GI-LABEL: selecti32i32_0:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, lt
+; CHECK-GI-NEXT:    cset w8, mi
 ; CHECK-GI-NEXT:    sbfx w0, w8, #0, #1
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
@@ -211,7 +211,7 @@ define i32 @selecti32i32_m1(i32 %a) {
 ; CHECK-GI-LABEL: selecti32i32_m1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, ge
+; CHECK-GI-NEXT:    cset w8, pl
 ; CHECK-GI-NEXT:    sbfx w0, w8, #0, #1
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
@@ -230,7 +230,7 @@ define i32 @selecti32i32_1(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #-2 // =0xfffffffe
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csinc w0, w8, wzr, lt
+; CHECK-GI-NEXT:    csinc w0, w8, wzr, mi
 ; CHECK-GI-NEXT:    ret
   %c = icmp sgt i32 %a, -1
   %s = select i1 %c, i32 1, i32 -2
@@ -249,7 +249,7 @@ define i32 @selecti32i32_sge(i32 %a) {
 ; CHECK-GI-NEXT:    mov w8, #-13 // =0xfffffff3
 ; CHECK-GI-NEXT:    mov w9, #12 // =0xc
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, ge
+; CHECK-GI-NEXT:    csel w0, w9, w8, pl
 ; CHECK-GI-NEXT:    ret
   %c = icmp sge i32 %a, 0
   %s = select i1 %c, i32 12, i32 -13
@@ -268,7 +268,7 @@ define i32 @selecti32i32_slt(i32 %a) {
 ; CHECK-GI-NEXT:    mov w8, #12 // =0xc
 ; CHECK-GI-NEXT:    mov w9, #-13 // =0xfffffff3
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, lt
+; CHECK-GI-NEXT:    csel w0, w9, w8, mi
 ; CHECK-GI-NEXT:    ret
   %c = icmp slt i32 %a, 0
   %s = select i1 %c, i32 -13, i32 12
@@ -287,7 +287,7 @@ define i32 @selecti32i32_sle(i32 %a) {
 ; CHECK-GI-NEXT:    mov w8, #12 // =0xc
 ; CHECK-GI-NEXT:    mov w9, #-13 // =0xfffffff3
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, lt
+; CHECK-GI-NEXT:    csel w0, w9, w8, mi
 ; CHECK-GI-NEXT:    ret
   %c = icmp sle i32 %a, -1
   %s = select i1 %c, i32 -13, i32 12
@@ -306,7 +306,7 @@ define i32 @selecti32i32_sgt(i32 %a) {
 ; CHECK-GI-NEXT:    mov w8, #12 // =0xc
 ; CHECK-GI-NEXT:    mov w9, #-13 // =0xfffffff3
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w9, w8, lt
+; CHECK-GI-NEXT:    csel w0, w9, w8, mi
 ; CHECK-GI-NEXT:    ret
   %c = icmp sle i32 %a, -1
   %s = select i1 %c, i32 -13, i32 12
@@ -318,7 +318,7 @@ define i32 @oneusecmp(i32 %a, i32 %b, i32 %d) {
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    asr w8, w0, #31
 ; CHECK-SD-NEXT:    cmp w0, #0
-; CHECK-SD-NEXT:    csel w9, w2, w1, lt
+; CHECK-SD-NEXT:    csel w9, w2, w1, mi
 ; CHECK-SD-NEXT:    eor w8, w8, #0x7f
 ; CHECK-SD-NEXT:    add w0, w8, w9
 ; CHECK-SD-NEXT:    ret
@@ -328,8 +328,8 @@ define i32 @oneusecmp(i32 %a, i32 %b, i32 %d) {
 ; CHECK-GI-NEXT:    mov w8, #127 // =0x7f
 ; CHECK-GI-NEXT:    mov w9, #-128 // =0xffffff80
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w8, w9, w8, lt
-; CHECK-GI-NEXT:    csel w9, w2, w1, lt
+; CHECK-GI-NEXT:    csel w8, w9, w8, mi
+; CHECK-GI-NEXT:    csel w9, w2, w1, mi
 ; CHECK-GI-NEXT:    add w0, w8, w9
 ; CHECK-GI-NEXT:    ret
   %c = icmp sle i32 %a, -1
diff --git a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
index 0d4a636446164..293b74ecd9d3a 100644
--- a/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
+++ b/llvm/test/CodeGen/AArch64/selectcc-to-shiftand.ll
@@ -15,7 +15,7 @@ define i32 @neg_sel_constants(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #5 // =0x5
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w8, wzr, lt
+; CHECK-GI-NEXT:    csel w0, w8, wzr, mi
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp slt i32 %a, 0
   %retval = select i1 %tmp.1, i32 5, i32 0
@@ -34,7 +34,7 @@ define i32 @neg_sel_special_constant(i32 %a) {
 ; CHECK-GI-LABEL: neg_sel_special_constant:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, lt
+; CHECK-GI-NEXT:    cset w8, mi
 ; CHECK-GI-NEXT:    lsl w0, w8, #9
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp slt i32 %a, 0
@@ -53,7 +53,7 @@ define i32 @neg_sel_variable_and_zero(i32 %a, i32 %b) {
 ; CHECK-GI-LABEL: neg_sel_variable_and_zero:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w1, wzr, lt
+; CHECK-GI-NEXT:    csel w0, w1, wzr, mi
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp slt i32 %a, 0
   %retval = select i1 %tmp.1, i32 %b, i32 0
@@ -93,7 +93,7 @@ define i32 @pos_sel_constants(i32 %a) {
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    mov w8, #5 // =0x5
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w8, wzr, ge
+; CHECK-GI-NEXT:    csel w0, w8, wzr, pl
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
   %retval = select i1 %tmp.1, i32 5, i32 0
@@ -112,7 +112,7 @@ define i32 @pos_sel_special_constant(i32 %a) {
 ; CHECK-GI-LABEL: pos_sel_special_constant:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    cset w8, ge
+; CHECK-GI-NEXT:    cset w8, pl
 ; CHECK-GI-NEXT:    lsl w0, w8, #9
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
@@ -131,7 +131,7 @@ define i32 @pos_sel_variable_and_zero(i32 %a, i32 %b) {
 ; CHECK-GI-LABEL: pos_sel_variable_and_zero:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    cmp w0, #0
-; CHECK-GI-NEXT:    csel w0, w1, wzr, ge
+; CHECK-GI-NEXT:    csel w0, w1, wzr, pl
 ; CHECK-GI-NEXT:    ret
   %tmp.1 = icmp sgt i32 %a, -1
   %retval = select i1 %tmp.1, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/AArch64/signbit-shift.ll b/llvm/test/CodeGen/AArch64/signbit-shift.ll
index 0e6da326a31f4..ce8a96386d04c 100644
--- a/llvm/test/CodeGen/AArch64/signbit-shift.ll
+++ b/llvm/test/CodeGen/AArch64/signbit-shift.ll
@@ -128,7 +128,7 @@ define i32 @sel_ifneg_tval_bigger(i32 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #41 // =0x29
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cinc w0, w8, lt
+; CHECK-NEXT:    cinc w0, w8, mi
 ; CHECK-NEXT:    ret
   %c = icmp slt i32 %x, 0
   %r = select i1 %c, i32 42, i32 41
@@ -162,7 +162,7 @@ define i32 @sel_ifneg_fval_bigger(i32 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #41 // =0x29
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cinc w0, w8, ge
+; CHECK-NEXT:    cinc w0, w8, pl
 ; CHECK-NEXT:    ret
   %c = icmp slt i32 %x, 0
   %r = select i1 %c, i32 41, i32 42
diff --git a/llvm/test/CodeGen/AArch64/smul_fix_sat.ll b/llvm/test/CodeGen/AArch64/smul_fix_sat.ll
index c2d8d34b9305a..7cb680b8449cf 100644
--- a/llvm/test/CodeGen/AArch64/smul_fix_sat.ll
+++ b/llvm/test/CodeGen/AArch64/smul_fix_sat.ll
@@ -63,7 +63,7 @@ define i32 @func4(i32 %x, i32 %y) nounwind {
 ; CHECK-NEXT:    eor w10, w0, w1
 ; CHECK-NEXT:    mov w8, #-2147483648 // =0x80000000
 ; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    cinv w8, w8, ge
+; CHECK-NEXT:    cinv w8, w8, pl
 ; CHECK-NEXT:    cmp x9, w9, sxtw
 ; CHECK-NEXT:    csel w0, w8, w9, ne
 ; CHECK-NEXT:    ret
@@ -79,7 +79,7 @@ define i64 @func5(i64 %x, i64 %y) {
 ; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
 ; CHECK-NEXT:    cmp x11, #0
 ; CHECK-NEXT:    smulh x10, x0, x1
-; CHECK-NEXT:    cinv x8, x8, ge
+; CHECK-NEXT:    cinv x8, x8, pl
 ; CHECK-NEXT:    cmp x10, x9, asr #63
 ; CHECK-NEXT:    csel x0, x8, x9, ne
 ; CHECK-NEXT:    ret
@@ -96,7 +96,7 @@ define i4 @func6(i4 %x, i4 %y) nounwind {
 ; CHECK-NEXT:    smull x11, w10, w9
 ; CHECK-NEXT:    eor w9, w10, w9
 ; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    cinv w8, w8, ge
+; CHECK-NEXT:    cinv w8, w8, pl
 ; CHECK-NEXT:    cmp x11, w11, sxtw
 ; CHECK-NEXT:    csel w8, w8, w11, ne
 ; CHECK-NEXT:    asr w0, w8, #28
@@ -158,11 +158,11 @@ define <2 x i32> @vec(<2 x i32> %x, <2 x i32> %y) nounwind {
 ; CHECK-NEXT:    cmp w9, #0
 ; CHECK-NEXT:    smull x9, w12, w10
 ; CHECK-NEXT:    eor w10, w12, w10
-; CHECK-NEXT:    cinv w12, w8, ge
+; CHECK-NEXT:    cinv w12, w8, pl
 ; CHECK-NEXT:    cmp x11, w11, sxtw
 ; CHECK-NEXT:    csel w11, w12, w11, ne
 ; CHECK-NEXT:    cmp w10, #0
-; CHECK-NEXT:    cinv w8, w8, ge
+; CHECK-NEXT:    cinv w8, w8, pl
 ; CHECK-NEXT:    cmp x9, w9, sxtw
 ; CHECK-NEXT:    csel w8, w8, w9, ne
 ; CHECK-NEXT:    fmov s0, w8
@@ -188,12 +188,12 @@ define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; CHECK-NEXT:    cmp w11, #0
 ; CHECK-NEXT:    smull x11, w13, w12
 ; CHECK-NEXT:    eor w12, w13, w12
-; CHECK-NEXT:    cinv w13, w8, ge
+; CHECK-NEXT:    cinv w13, w8, pl
 ; CHECK-NEXT:    cmp x9, w9, sxtw
 ; CHECK-NEXT:    csel w9, w13, w9, ne
 ; CHECK-NEXT:    cmp w12, #0
 ; CHECK-NEXT:    mov w13, v1.s[3]
-; CHECK-NEXT:    cinv w12, w8, ge
+; CHECK-NEXT:    cinv w12, w8, pl
 ; CHECK-NEXT:    cmp x11, w11, sxtw
 ; CHECK-NEXT:    csel w11, w12, w11, ne
 ; CHECK-NEXT:    mov w12, v0.s[3]
@@ -203,13 +203,13 @@ define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
 ; CHECK-NEXT:    eor w9, w14, w10
 ; CHECK-NEXT:    smull x10, w12, w13
 ; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    cinv w9, w8, ge
+; CHECK-NEXT:    cinv w9, w8, pl
 ; CHECK-NEXT:    cmp x11, w11, sxtw
 ; CHECK-NEXT:    csel w9, w9, w11, ne
 ; CHECK-NEXT:    mov v0.s[2], w9
 ; CHECK-NEXT:    eor w9, w12, w13
 ; CHECK-NEXT:    cmp w9, #0
-; CHECK-NEXT:    cinv w8, w8, ge
+; CHECK-NEXT:    cinv w8, w8, pl
 ; CHECK-NEXT:    cmp x10, w10, sxtw
 ; CHECK-NEXT:    csel w8, w8, w10, ne
 ; CHECK-NEXT:    mov v0.s[3], w8
diff --git a/llvm/test/CodeGen/AArch64/srem-pow2.ll b/llvm/test/CodeGen/AArch64/srem-pow2.ll
index 4c114d185997e..a0124b9ab4a5e 100644
--- a/llvm/test/CodeGen/AArch64/srem-pow2.ll
+++ b/llvm/test/CodeGen/AArch64/srem-pow2.ll
@@ -45,7 +45,7 @@ define i32 @fold_srem_2_i64(i32 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and w8, w0, #0x1
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    cneg w0, w8, lt
+; CHECK-NEXT:    cneg w0, w8, mi
 ; CHECK-NEXT:    ret
   %1 = srem i32 %x, 2
   ret i32 %1
@@ -56,7 +56,7 @@ define i64 @fold_srem_2_i32(i64 %x) {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    and x8, x0, #0x1
 ; CHECK-NEXT:    cmp x0, #0
-; CHECK-NEXT:    cneg x0, x8, lt
+; CHECK-NEXT:    cneg x0, x8, mi
 ; CHECK-NEXT:    ret
   %1 = srem i64 %x, 2
   ret i64 %1
diff --git a/llvm/test/CodeGen/AArch64/sshl_sat.ll b/llvm/test/CodeGen/AArch64/sshl_sat.ll
index fbcd2db1298f0..be2b3e763733b 100644
--- a/llvm/test/CodeGen/AArch64/sshl_sat.ll
+++ b/llvm/test/CodeGen/AArch64/sshl_sat.ll
@@ -146,7 +146,7 @@ define i16 @combine_shlsat_to_shl_no_fold(i16 %x) nounwind {
 ; CHECK-NEXT:    mov w9, #-65536 // =0xffff0000
 ; CHECK-NEXT:    mov w10, #-2147483648 // =0x80000000
 ; CHECK-NEXT:    ands w8, w9, w8, lsl #14
-; CHECK-NEXT:    cinv w10, w10, ge
+; CHECK-NEXT:    cinv w10, w10, pl
 ; CHECK-NEXT:    lsl w9, w8, #3
 ; CHECK-NEXT:    cmp w8, w9, asr #3
 ; CHECK-NEXT:    csel w8, w10, w9, ne
diff --git a/llvm/test/CodeGen/AArch64/stack-hazard.ll b/llvm/test/CodeGen/AArch64/stack-hazard.ll
index 3a33405200132..a85ee22200398 100644
--- a/llvm/test/CodeGen/AArch64/stack-hazard.ll
+++ b/llvm/test/CodeGen/AArch64/stack-hazard.ll
@@ -1612,7 +1612,7 @@ define i32 @f128_libcall(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32
 ; CHECK0-NEXT:  .LBB27_4:
 ; CHECK0-NEXT:    cmp w0, #0
 ; CHECK0-NEXT:    .cfi_restore vg
-; CHECK0-NEXT:    cset w21, lt
+; CHECK0-NEXT:    cset w21, mi
 ; CHECK0-NEXT:    bl __arm_sme_state
 ; CHECK0-NEXT:    and x22, x0, #0x1
 ; CHECK0-NEXT:    .cfi_offset vg, -40
@@ -1627,7 +1627,7 @@ define i32 @f128_libcall(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32
 ; CHECK0-NEXT:    smstart sm
 ; CHECK0-NEXT:  .LBB27_8:
 ; CHECK0-NEXT:    cmp w0, #0
-; CHECK0-NEXT:    cset w8, ge
+; CHECK0-NEXT:    cset w8, pl
 ; CHECK0-NEXT:    tst w8, w21
 ; CHECK0-NEXT:    csel w0, w20, w19, ne
 ; CHECK0-NEXT:    .cfi_restore vg
@@ -1701,7 +1701,7 @@ define i32 @f128_libcall(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32
 ; CHECK64-NEXT:  .LBB27_4:
 ; CHECK64-NEXT:    cmp w0, #0
 ; CHECK64-NEXT:    .cfi_restore vg
-; CHECK64-NEXT:    cset w21, lt
+; CHECK64-NEXT:    cset w21, mi
 ; CHECK64-NEXT:    bl __arm_sme_state
 ; CHECK64-NEXT:    and x22, x0, #0x1
 ; CHECK64-NEXT:    .cfi_offset vg, -48
@@ -1716,7 +1716,7 @@ define i32 @f128_libcall(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32
 ; CHECK64-NEXT:    smstart sm
 ; CHECK64-NEXT:  .LBB27_8:
 ; CHECK64-NEXT:    cmp w0, #0
-; CHECK64-NEXT:    cset w8, ge
+; CHECK64-NEXT:    cset w8, pl
 ; CHECK64-NEXT:    tst w8, w21
 ; CHECK64-NEXT:    csel w0, w20, w19, ne
 ; CHECK64-NEXT:    .cfi_restore vg
@@ -1799,7 +1799,7 @@ define i32 @f128_libcall(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32
 ; CHECK1024-NEXT:  .LBB27_4:
 ; CHECK1024-NEXT:    cmp w0, #0
 ; CHECK1024-NEXT:    .cfi_restore vg
-; CHECK1024-NEXT:    cset w21, lt
+; CHECK1024-NEXT:    cset w21, mi
 ; CHECK1024-NEXT:    bl __arm_sme_state
 ; CHECK1024-NEXT:    and x22, x0, #0x1
 ; CHECK1024-NEXT:    .cfi_offset vg, -48
@@ -1815,7 +1815,7 @@ define i32 @f128_libcall(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32
 ; CHECK1024-NEXT:    smstart sm
 ; CHECK1024-NEXT:  .LBB27_8:
 ; CHECK1024-NEXT:    cmp w0, #0
-; CHECK1024-NEXT:    cset w8, ge
+; CHECK1024-NEXT:    cset w8, pl
 ; CHECK1024-NEXT:    tst w8, w21
 ; CHECK1024-NEXT:    csel w0, w20, w19, ne
 ; CHECK1024-NEXT:    .cfi_restore vg
diff --git a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
index 3fe7346b3db28..4a04934971711 100644
--- a/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
+++ b/llvm/test/CodeGen/AArch64/tbz-tbnz.ll
@@ -200,18 +200,18 @@ define void @test8(i64 %val1, i64 %val2, i64 %val3) {
 ; CHECK-SD-LABEL: test8:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    tst x0, x1
-; CHECK-SD-NEXT:    b.ge .LBB7_3
+; CHECK-SD-NEXT:    b.pl .LBB7_3
 ; CHECK-SD-NEXT:  // %bb.1:
 ; CHECK-SD-NEXT:    and x8, x1, x2
 ; CHECK-SD-NEXT:    tbnz x8, #63, .LBB7_3
 ; CHECK-SD-NEXT:  // %bb.2: // %if.then2
 ; CHECK-SD-NEXT:    tst x0, x1, lsl #63
-; CHECK-SD-NEXT:    b.lt .LBB7_4
+; CHECK-SD-NEXT:    b.mi .LBB7_4
 ; CHECK-SD-NEXT:  .LBB7_3: // %if.end
 ; CHECK-SD-NEXT:    ret
 ; CHECK-SD-NEXT:  .LBB7_4: // %if.then3
 ; CHECK-SD-NEXT:    tst x0, x1, lsl #62
-; CHECK-SD-NEXT:    b.lt .LBB7_3
+; CHECK-SD-NEXT:    b.mi .LBB7_3
 ; CHECK-SD-NEXT:  // %bb.5: // %if.then4
 ; CHECK-SD-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-SD-NEXT:    .cfi_def_cfa_offset 16
@@ -223,18 +223,18 @@ define void @test8(i64 %val1, i64 %val2, i64 %val3) {
 ; CHECK-GI-LABEL: test8:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    tst x0, x1
-; CHECK-GI-NEXT:    b.ge .LBB7_3
+; CHECK-GI-NEXT:    b.pl .LBB7_3
 ; CHECK-GI-NEXT:  // %bb.1:
 ; CHECK-GI-NEXT:    tst x1, x2
-; CHECK-GI-NEXT:    b.lt .LBB7_3
+; CHECK-GI-NEXT:    b.mi .LBB7_3
 ; CHECK-GI-NEXT:  // %bb.2: // %if.then2
 ; CHECK-GI-NEXT:    tst x0, x1, lsl #63
-; CHECK-GI-NEXT:    b.lt .LBB7_4
+; CHECK-GI-NEXT:    b.mi .LBB7_4
 ; CHECK-GI-NEXT:  .LBB7_3: // %if.end
 ; CHECK-GI-NEXT:    ret
 ; CHECK-GI-NEXT:  .LBB7_4: // %if.then3
 ; CHECK-GI-NEXT:    tst x0, x1, lsl #62
-; CHECK-GI-NEXT:    b.lt .LBB7_3
+; CHECK-GI-NEXT:    b.mi .LBB7_3
 ; CHECK-GI-NEXT:  // %bb.5: // %if.then4
 ; CHECK-GI-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-GI-NEXT:    .cfi_def_cfa_offset 16
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
index 1bdf7bbb7f813..62d41fca10db3 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
@@ -28,7 +28,7 @@ define i32 @reduce_and_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.b[0]
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i8> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -122,7 +122,7 @@ define i32 @reduce_and_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.h[0]
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i16> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -200,7 +200,7 @@ define i32 @reduce_and_v1i32(<1 x i32> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i32> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -261,7 +261,7 @@ define i32 @reduce_and_v1i64(<1 x i64> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fmov x8, d0
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i64> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -307,7 +307,7 @@ define i32 @reduce_or_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.b[0]
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i8> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -401,7 +401,7 @@ define i32 @reduce_or_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.h[0]
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i16> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -479,7 +479,7 @@ define i32 @reduce_or_v1i32(<1 x i32> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i32> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -540,7 +540,7 @@ define i32 @reduce_or_v1i64(<1 x i64> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fmov x8, d0
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i64> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -586,7 +586,7 @@ define i32 @reduce_xor_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.b[0]
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i8> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
@@ -681,7 +681,7 @@ define i32 @reduce_xor_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    smov w8, v0.h[0]
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i16> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
@@ -759,7 +759,7 @@ define i32 @reduce_xor_v1i32(<1 x i32> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    cmp w8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i32> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
@@ -820,7 +820,7 @@ define i32 @reduce_xor_v1i64(<1 x i64> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-NEXT:    fmov x8, d0
 ; CHECK-NEXT:    cmp x8, #0
-; CHECK-NEXT:    csel w0, w0, w1, lt
+; CHECK-NEXT:    csel w0, w0, w1, mi
 ; CHECK-NEXT:    ret
   %x = icmp slt <1 x i64> %a0, zeroinitializer
   %y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
index 300081dc3ec40..79a8fc35e833d 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmin-legalization.ll
@@ -589,7 +589,7 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
 ; CHECK-NEXT:    bl __lttf2
 ; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
 ; CHECK-NEXT:    cmp w0, #0
-; CHECK-NEXT:    b.ge .LBB18_2
+; CHECK-NEXT:    b.pl .LBB18_2
 ; CHECK-NEXT:  // %bb.1:
 ; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
 ; CHECK-NEXT:  .LBB18_2:



More information about the llvm-commits mailing list