[llvm] 02769f2 - AArch64/GlobalISel: Stop using legal s1 values

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 8 08:55:19 PDT 2022


Author: Matt Arsenault
Date: 2022-07-08T11:55:08-04:00
New Revision: 02769f2b3fdebb5066d7a973b171d2873a804560

URL: https://github.com/llvm/llvm-project/commit/02769f2b3fdebb5066d7a973b171d2873a804560
DIFF: https://github.com/llvm/llvm-project/commit/02769f2b3fdebb5066d7a973b171d2873a804560.diff

LOG: AArch64/GlobalISel: Stop using legal s1 values

As far as I can tell treating s1 values as legal makes no sense. There
are no allocatable 1-bit registers. SelectionDAG legalizes the usual
set of boolean operations to 32-bits, and this should do the
same. This avoids some special case handling in the selector of s1
values, and some extra code to look through truncates.

This makes some code worse at -O0, since nothing cleans up the and 1
the artifact combiner inserts. We could probably add some
non-essential combines or teach the artifact combiner to elide
intermediates betweeen boolean uses and defs.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
    llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/huge-switch.ll
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-min-max.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-sadde.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddsat.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssube.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubsat.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadd-sat.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadde.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-uaddo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-usub-sat.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-usube.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-usubo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner-zext-trunc-crash.mir
    llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
    llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
    llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-sext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-zext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbank-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-br.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-truncstore-atomic.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select.mir
    llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
    llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
    llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
    llvm/test/CodeGen/AArch64/GlobalISel/widen-narrow-tbz-tbnz.mir
    llvm/test/CodeGen/AArch64/arm64-xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 9a65687735fe..eb8d0552173d 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -1710,11 +1710,6 @@ bool AArch64InstructionSelector::selectCompareBranch(
     MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) {
   Register CondReg = I.getOperand(0).getReg();
   MachineInstr *CCMI = MRI.getVRegDef(CondReg);
-  if (CCMI->getOpcode() == TargetOpcode::G_TRUNC) {
-    CondReg = CCMI->getOperand(1).getReg();
-    CCMI = MRI.getVRegDef(CondReg);
-  }
-
   // Try to select the G_BRCOND using whatever is feeding the condition if
   // possible.
   unsigned CCMIOpc = CCMI->getOpcode();
@@ -3346,12 +3341,6 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {
 
   case TargetOpcode::G_SELECT: {
     auto &Sel = cast<GSelect>(I);
-    if (MRI.getType(Sel.getCondReg()) != LLT::scalar(1)) {
-      LLVM_DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
-                        << ", expected: " << LLT::scalar(1) << '\n');
-      return false;
-    }
-
     const Register CondReg = Sel.getCondReg();
     const Register TReg = Sel.getTrueReg();
     const Register FReg = Sel.getFalseReg();
@@ -4777,12 +4766,6 @@ static bool canEmitConjunction(Register Val, bool &CanNegate, bool &MustBeFirst,
     return false;
   MachineInstr *ValDef = MRI.getVRegDef(Val);
   unsigned Opcode = ValDef->getOpcode();
-  if (Opcode == TargetOpcode::G_TRUNC) {
-    // Look through a trunc.
-    Val = ValDef->getOperand(1).getReg();
-    ValDef = MRI.getVRegDef(Val);
-    Opcode = ValDef->getOpcode();
-  }
   if (isa<GAnyCmp>(ValDef)) {
     CanNegate = true;
     MustBeFirst = false;
@@ -4870,12 +4853,6 @@ MachineInstr *AArch64InstructionSelector::emitConjunctionRec(
   auto &MRI = *MIB.getMRI();
   MachineInstr *ValDef = MRI.getVRegDef(Val);
   unsigned Opcode = ValDef->getOpcode();
-  if (Opcode == TargetOpcode::G_TRUNC) {
-    // Look through a trunc.
-    Val = ValDef->getOperand(1).getReg();
-    ValDef = MRI.getVRegDef(Val);
-    Opcode = ValDef->getOpcode();
-  }
   if (auto *Cmp = dyn_cast<GAnyCmp>(ValDef)) {
     Register LHS = Cmp->getLHSReg();
     Register RHS = Cmp->getRHSReg();
@@ -5026,31 +5003,17 @@ bool AArch64InstructionSelector::tryOptSelect(GSelect &I) {
 
   // First, check if the condition is defined by a compare.
   MachineInstr *CondDef = MRI.getVRegDef(I.getOperand(1).getReg());
-  while (CondDef) {
-    // We can only fold if all of the defs have one use.
-    Register CondDefReg = CondDef->getOperand(0).getReg();
-    if (!MRI.hasOneNonDBGUse(CondDefReg)) {
-      // Unless it's another select.
-      for (const MachineInstr &UI : MRI.use_nodbg_instructions(CondDefReg)) {
-        if (CondDef == &UI)
-          continue;
-        if (UI.getOpcode() != TargetOpcode::G_SELECT)
-          return false;
-      }
-    }
-
-    // We can skip over G_TRUNC since the condition is 1-bit.
-    // Truncating/extending can have no impact on the value.
-    unsigned Opc = CondDef->getOpcode();
-    if (Opc != TargetOpcode::COPY && Opc != TargetOpcode::G_TRUNC)
-      break;
-
-    // Can't see past copies from physregs.
-    if (Opc == TargetOpcode::COPY &&
-        Register::isPhysicalRegister(CondDef->getOperand(1).getReg()))
-      return false;
 
-    CondDef = MRI.getVRegDef(CondDef->getOperand(1).getReg());
+  // We can only fold if all of the defs have one use.
+  Register CondDefReg = CondDef->getOperand(0).getReg();
+  if (!MRI.hasOneNonDBGUse(CondDefReg)) {
+    // Unless it's another select.
+    for (const MachineInstr &UI : MRI.use_nodbg_instructions(CondDefReg)) {
+      if (CondDef == &UI)
+        continue;
+      if (UI.getOpcode() != TargetOpcode::G_SELECT)
+        return false;
+    }
   }
 
   // Is the condition defined by a compare?

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 10f85103e468..d3617b87a851 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -42,7 +42,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
     : ST(&ST) {
   using namespace TargetOpcode;
   const LLT p0 = LLT::pointer(0, 64);
-  const LLT s1 = LLT::scalar(1);
   const LLT s8 = LLT::scalar(8);
   const LLT s16 = LLT::scalar(16);
   const LLT s32 = LLT::scalar(32);
@@ -80,7 +79,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
   const LLT &MinFPScalar = HasFP16 ? s16 : s32;
 
   getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
-      .legalFor({p0, s1, s8, s16, s32, s64})
+      .legalFor({p0, s8, s16, s32, s64})
       .legalFor(PackedVectorAllTypeList)
       .widenScalarToNextPow2(0)
       .clampScalar(0, s8, s64)
@@ -198,8 +197,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
 
   getActionDefinitionsBuilder(
       {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
-      .legalFor({{s32, s1}, {s64, s1}})
+      .legalFor({{s32, s32}, {s64, s32}})
       .clampScalar(0, s32, s64)
+       .clampScalar(1, s32, s64)
       .widenScalarToNextPow2(0);
 
   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FNEG})
@@ -241,7 +241,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
 
   getActionDefinitionsBuilder(G_INSERT)
       .legalIf(all(typeInSet(0, {s32, s64, p0}),
-                   typeInSet(1, {s1, s8, s16, s32}), smallerThan(1, 0)))
+                   typeInSet(1, {s8, s16, s32}), smallerThan(1, 0)))
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, s64)
       .widenScalarToNextPow2(1)
@@ -433,10 +433,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
 
     const LLT &SrcTy = Query.Types[1];
 
-    // Special case for s1.
-    if (SrcTy == s1)
-      return true;
-
     // Make sure we fit in a register otherwise. Don't bother checking that
     // the source type is below 128 bits. We shouldn't be allowing anything
     // through which is wider than the destination in the first place.
@@ -489,13 +485,16 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .widenScalarToNextPow2(0);
 
   // Control-flow
-  getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s8, s16, s32});
+  getActionDefinitionsBuilder(G_BRCOND)
+    .legalFor({s32})
+    .clampScalar(0, s32, s32);
   getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
 
   getActionDefinitionsBuilder(G_SELECT)
-      .legalFor({{s32, s1}, {s64, s1}, {p0, s1}})
+      .legalFor({{s32, s32}, {s64, s32}, {p0, s32}})
       .widenScalarToNextPow2(0)
       .clampScalar(0, s32, s64)
+      .clampScalar(1, s32, s32)
       .minScalarEltSameAsIf(all(isVector(0), isVector(1)), 1, 0)
       .lowerIf(isVector(0));
 
@@ -508,7 +507,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
     getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0});
 
   getActionDefinitionsBuilder(G_PTRTOINT)
-      .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0})
+      .legalForCartesianProduct({s8, s16, s32, s64}, {p0})
       .legalFor({{v2s64, v2p0}})
       .maxScalar(0, s64)
       .widenScalarToNextPow2(0, /*Min*/ 8);
@@ -525,7 +524,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       // FIXME: This is wrong since G_BITCAST is not allowed to change the
       // number of bits but it's what the previous code described and fixing
       // it breaks tests.
-      .legalForCartesianProduct({s1, s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
+      .legalForCartesianProduct({s8, s16, s32, s64, s128, v16s8, v8s8, v4s8,
                                  v8s16, v4s16, v2s16, v4s32, v2s32, v2s64,
                                  v2p0});
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
index c08f590cf28b..270d9daecc9a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic.ll
@@ -2749,7 +2749,10 @@ define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    sxtb w9, w10
 ; CHECK-NOLSE-O0-NEXT:    subs w9, w9, w8, sxtb
-; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, le
+; CHECK-NOLSE-O0-NEXT:    cset w9, le
+; CHECK-NOLSE-O0-NEXT:    and w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB33_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB33_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -2815,7 +2818,10 @@ define i8 @atomicrmw_min_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    sxtb w9, w10
 ; CHECK-LDAPR-O0-NEXT:    subs w9, w9, w8, sxtb
-; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, le
+; CHECK-LDAPR-O0-NEXT:    cset w9, le
+; CHECK-LDAPR-O0-NEXT:    and w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB33_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB33_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -2875,7 +2881,10 @@ define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    sxtb w9, w10
 ; CHECK-NOLSE-O0-NEXT:    subs w9, w9, w8, sxtb
-; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, gt
+; CHECK-NOLSE-O0-NEXT:    cset w9, gt
+; CHECK-NOLSE-O0-NEXT:    and w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB34_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB34_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -2941,7 +2950,10 @@ define i8 @atomicrmw_max_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    sxtb w9, w10
 ; CHECK-LDAPR-O0-NEXT:    subs w9, w9, w8, sxtb
-; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, gt
+; CHECK-LDAPR-O0-NEXT:    cset w9, gt
+; CHECK-LDAPR-O0-NEXT:    and w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB34_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB34_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -3002,7 +3014,10 @@ define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    and w9, w10, #0xff
 ; CHECK-NOLSE-O0-NEXT:    subs w9, w9, w8, uxtb
-; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, ls
+; CHECK-NOLSE-O0-NEXT:    cset w9, ls
+; CHECK-NOLSE-O0-NEXT:    and w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB35_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB35_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -3069,7 +3084,10 @@ define i8 @atomicrmw_umin_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    and w9, w10, #0xff
 ; CHECK-LDAPR-O0-NEXT:    subs w9, w9, w8, uxtb
-; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, ls
+; CHECK-LDAPR-O0-NEXT:    cset w9, ls
+; CHECK-LDAPR-O0-NEXT:    and w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB35_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB35_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -3130,7 +3148,10 @@ define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    and w9, w10, #0xff
 ; CHECK-NOLSE-O0-NEXT:    subs w9, w9, w8, uxtb
-; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, hi
+; CHECK-NOLSE-O0-NEXT:    cset w9, hi
+; CHECK-NOLSE-O0-NEXT:    and w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB36_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB36_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -3197,7 +3218,10 @@ define i8 @atomicrmw_umax_i8(i8* %ptr, i8 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w8, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    and w9, w10, #0xff
 ; CHECK-LDAPR-O0-NEXT:    subs w9, w9, w8, uxtb
-; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, hi
+; CHECK-LDAPR-O0-NEXT:    cset w9, hi
+; CHECK-LDAPR-O0-NEXT:    and w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w9, w9, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w10, w8, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB36_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB36_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -3967,7 +3991,10 @@ define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    sxth w10, w8
 ; CHECK-NOLSE-O0-NEXT:    subs w10, w10, w9, sxth
-; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, le
+; CHECK-NOLSE-O0-NEXT:    cset w10, le
+; CHECK-NOLSE-O0-NEXT:    and w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB43_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB43_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4033,7 +4060,10 @@ define i16 @atomicrmw_min_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    sxth w10, w8
 ; CHECK-LDAPR-O0-NEXT:    subs w10, w10, w9, sxth
-; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, le
+; CHECK-LDAPR-O0-NEXT:    cset w10, le
+; CHECK-LDAPR-O0-NEXT:    and w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB43_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB43_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4093,7 +4123,10 @@ define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    sxth w10, w8
 ; CHECK-NOLSE-O0-NEXT:    subs w10, w10, w9, sxth
-; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, gt
+; CHECK-NOLSE-O0-NEXT:    cset w10, gt
+; CHECK-NOLSE-O0-NEXT:    and w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB44_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB44_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4159,7 +4192,10 @@ define i16 @atomicrmw_max_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    sxth w10, w8
 ; CHECK-LDAPR-O0-NEXT:    subs w10, w10, w9, sxth
-; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, gt
+; CHECK-LDAPR-O0-NEXT:    cset w10, gt
+; CHECK-LDAPR-O0-NEXT:    and w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB44_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB44_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4220,7 +4256,10 @@ define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    uxth w10, w8
 ; CHECK-NOLSE-O0-NEXT:    subs w10, w10, w9, uxth
-; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, ls
+; CHECK-NOLSE-O0-NEXT:    cset w10, ls
+; CHECK-NOLSE-O0-NEXT:    and w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB45_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB45_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4287,7 +4326,10 @@ define i16 @atomicrmw_umin_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    uxth w10, w8
 ; CHECK-LDAPR-O0-NEXT:    subs w10, w10, w9, uxth
-; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, ls
+; CHECK-LDAPR-O0-NEXT:    cset w10, ls
+; CHECK-LDAPR-O0-NEXT:    and w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB45_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB45_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4348,7 +4390,10 @@ define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-NOLSE-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-NOLSE-O0-NEXT:    uxth w10, w8
 ; CHECK-NOLSE-O0-NEXT:    subs w10, w10, w9, uxth
-; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, hi
+; CHECK-NOLSE-O0-NEXT:    cset w10, hi
+; CHECK-NOLSE-O0-NEXT:    and w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-NOLSE-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-NOLSE-O0-NEXT:  LBB46_2: ; %atomicrmw.start
 ; CHECK-NOLSE-O0-NEXT:    ; Parent Loop BB46_1 Depth=1
 ; CHECK-NOLSE-O0-NEXT:    ; => This Inner Loop Header: Depth=2
@@ -4415,7 +4460,10 @@ define i16 @atomicrmw_umax_i16(i16* %ptr, i16 %rhs) {
 ; CHECK-LDAPR-O0-NEXT:    ldr w9, [sp, #24] ; 4-byte Folded Reload
 ; CHECK-LDAPR-O0-NEXT:    uxth w10, w8
 ; CHECK-LDAPR-O0-NEXT:    subs w10, w10, w9, uxth
-; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, hi
+; CHECK-LDAPR-O0-NEXT:    cset w10, hi
+; CHECK-LDAPR-O0-NEXT:    and w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    ands w10, w10, #0x1
+; CHECK-LDAPR-O0-NEXT:    csel w12, w8, w9, ne
 ; CHECK-LDAPR-O0-NEXT:  LBB46_2: ; %atomicrmw.start
 ; CHECK-LDAPR-O0-NEXT:    ; Parent Loop BB46_1 Depth=1
 ; CHECK-LDAPR-O0-NEXT:    ; => This Inner Loop Header: Depth=2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
index 13fd67a1c39a..cf80828151f4 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-brcond-fcmp.mir
@@ -38,8 +38,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(oeq), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -80,8 +79,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ogt), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -122,8 +120,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(oge), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -164,8 +161,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(olt), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -206,8 +202,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ole), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -249,8 +244,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(one), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -291,8 +285,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ord), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -333,8 +326,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(uno), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -376,8 +368,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ueq), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -418,8 +409,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ugt), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -460,8 +450,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(uge), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -502,8 +491,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ult), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -544,8 +532,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(ule), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs
@@ -586,8 +573,7 @@ body:             |
     %cmp_lhs:fpr(s32) = COPY $s0
     %cmp_rhs:fpr(s32) = COPY $s1
     %fcmp:gpr(s32) = G_FCMP floatpred(une), %cmp_lhs(s32), %cmp_rhs
-    %trunc:gpr(s1) = G_TRUNC %fcmp(s32)
-    G_BRCOND %trunc(s1), %bb.2
+    G_BRCOND %fcmp, %bb.2
     G_BR %bb.1
   bb.1:
     $s0 = COPY %cmp_lhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
index 37b9681ba6e8..0d83719d4082 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-fp-select.mir
@@ -30,9 +30,7 @@ body:             |
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
     ; CHECK-NEXT: nofpexcept FCMPSri [[COPY]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY [[CSINCWr]]
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY3]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[CSINCWr]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[FMOVS0_]], [[COPY1]], 1, implicit $nzcv
     ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
@@ -41,9 +39,7 @@ body:             |
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(oeq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %6(s1), %2, %1
+    %4:fpr(s32) = G_SELECT %5, %2, %1
     $w1 = COPY %5(s32)
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
@@ -76,10 +72,8 @@ body:             |
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(oeq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %6(s1), %2, %1
-    %7:fpr(s32) = G_SELECT %6(s1), %1, %2
+    %4:fpr(s32) = G_SELECT %5, %2, %1
+    %7:fpr(s32) = G_SELECT %5, %1, %2
     $s0 = COPY %4(s32)
     $s1 = COPY %7(s32)
     RET_ReallyLR implicit $s0
@@ -110,9 +104,7 @@ body:             |
     %2:gpr(s32) = G_CONSTANT i32 0
     %5:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %6:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %6(s32)
-    %7:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %7(s1), %1, %5
+    %4:fpr(s32) = G_SELECT %6, %1, %5
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -141,9 +133,7 @@ body:             |
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(oeq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %6(s1), %2, %1
+    %4:fpr(s32) = G_SELECT %5, %2, %1
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -168,9 +158,7 @@ body:             |
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY [[ORRWrr]]
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY3]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[ORRWrr]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[FMOVS0_]], [[COPY1]], 1, implicit $nzcv
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
@@ -178,9 +166,7 @@ body:             |
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(ueq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %6(s1), %2, %1
+    %4:fpr(s32) = G_SELECT %5, %2, %1
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -205,9 +191,7 @@ body:             |
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY [[ORRWrr]]
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY3]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[ORRWrr]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY1]], [[FMOVS0_]], 1, implicit $nzcv
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
@@ -215,9 +199,7 @@ body:             |
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(one), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %6(s1), %1, %2
+    %4:fpr(s32) = G_SELECT %5, %1, %2
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -246,9 +228,7 @@ body:             |
     %1:fpr(s32) = COPY $s1
     %2:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(une), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s32) = G_SELECT %6(s1), %1, %2
+    %4:fpr(s32) = G_SELECT %5, %1, %2
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -277,9 +257,7 @@ body:             |
     %1:fpr(s64) = COPY $d1
     %2:fpr(s64) = G_FCONSTANT double 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(oeq), %0(s64), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s64) = G_SELECT %6(s1), %2, %1
+    %4:fpr(s64) = G_SELECT %5, %2, %1
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 
@@ -304,9 +282,7 @@ body:             |
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY [[ORRWrr]]
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY3]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[ORRWrr]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELDrrr:%[0-9]+]]:fpr64 = FCSELDrrr [[FMOVD0_]], [[COPY1]], 1, implicit $nzcv
     ; CHECK-NEXT: $d0 = COPY [[FCSELDrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
@@ -314,9 +290,7 @@ body:             |
     %1:fpr(s64) = COPY $d1
     %2:fpr(s64) = G_FCONSTANT double 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(ueq), %0(s64), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s64) = G_SELECT %6(s1), %2, %1
+    %4:fpr(s64) = G_SELECT %5, %2, %1
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 
@@ -341,9 +315,7 @@ body:             |
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 5, implicit $nzcv
     ; CHECK-NEXT: [[CSINCWr1:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
     ; CHECK-NEXT: [[ORRWrr:%[0-9]+]]:gpr32 = ORRWrr [[CSINCWr]], [[CSINCWr1]]
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY [[ORRWrr]]
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr32 = COPY [[COPY2]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY3]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[ORRWrr]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELDrrr:%[0-9]+]]:fpr64 = FCSELDrrr [[COPY1]], [[FMOVD0_]], 1, implicit $nzcv
     ; CHECK-NEXT: $d0 = COPY [[FCSELDrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
@@ -351,9 +323,7 @@ body:             |
     %1:fpr(s64) = COPY $d1
     %2:fpr(s64) = G_FCONSTANT double 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(one), %0(s64), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s64) = G_SELECT %6(s1), %1, %2
+    %4:fpr(s64) = G_SELECT %5, %1, %2
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 
@@ -382,9 +352,7 @@ body:             |
     %1:fpr(s64) = COPY $d1
     %2:fpr(s64) = G_FCONSTANT double 0.000000e+00
     %5:gpr(s32) = G_FCMP floatpred(une), %0(s64), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %6:fpr(s1) = COPY %3(s1)
-    %4:fpr(s64) = G_SELECT %6(s1), %1, %2
+    %4:fpr(s64) = G_SELECT %5, %1, %2
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 
@@ -402,12 +370,12 @@ body:             |
     ; CHECK-LABEL: name: copy_from_physreg
     ; CHECK: liveins: $s0, $w0, $w1
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s0
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[FMOVS0_:%[0-9]+]]:fpr32 = FMOVS0
     ; CHECK-NEXT: BL @copy_from_physreg, implicit-def $w0
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
-    ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY1]], [[FMOVS0_]], 1, implicit $nzcv
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[FMOVS0_]]
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY1]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY]], [[FMOVS0_]], 1, implicit $nzcv
     ; CHECK-NEXT: BL @copy_from_physreg, implicit-def $w0
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
@@ -415,8 +383,7 @@ body:             |
     %1:fpr(s32) = COPY $s0
     %5:fpr(s32) = G_FCONSTANT float 0.000000e+00
     BL @copy_from_physreg, implicit-def $w0
-    %3:gpr(s1) = G_TRUNC %0(s32)
-    %4:fpr(s32) = G_SELECT %3(s1), %1, %5
+    %4:fpr(s32) = G_SELECT %5, %1, %5
     BL @copy_from_physreg, implicit-def $w0
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
index 7f40569c2826..b396d084446b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/fold-select.mir
@@ -31,8 +31,7 @@ body:             |
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 0
     %5:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %4:gpr(s32) = G_SELECT %3(s1), %2, %1
+    %4:gpr(s32) = G_SELECT %5, %2, %1
     $w0 = COPY %4(s32)
     RET_ReallyLR implicit $w0
 
@@ -62,8 +61,7 @@ body:             |
     %3:fpr(s32) = G_FCONSTANT float 0.000000e+00
     %6:gpr(s32) = G_CONSTANT i32 0
     %7:gpr(s32) = G_FCMP floatpred(oeq), %2(s32), %3
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %1
+    %5:gpr(s32) = G_SELECT %7, %6, %1
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -90,8 +88,7 @@ body:             |
     %1:gpr(s32) = COPY $w1
     %2:gpr(s32) = G_CONSTANT i32 1
     %5:gpr(s32) = G_ICMP intpred(eq), %0(s32), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    %4:gpr(s32) = G_SELECT %3(s1), %0, %2
+    %4:gpr(s32) = G_SELECT %5, %0, %2
     $w0 = COPY %4(s32)
     RET_ReallyLR implicit $w0
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/huge-switch.ll b/llvm/test/CodeGen/AArch64/GlobalISel/huge-switch.ll
index 8742a848c4af..3c193307a863 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/huge-switch.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/huge-switch.ll
@@ -1,7 +1,9 @@
 ; RUN: llc -mtriple=arm64-apple-ios %s -o - -O0 -global-isel=1 | FileCheck %s
 define void @foo(i512 %in) {
 ; CHECK-LABEL: foo:
-; CHECK: cbz
+; CHECK: subs
+; CHECK-NEXT: cset
+; CHECK-NEXT: tbnz
   switch i512 %in, label %default [
     i512 3923188584616675477397368389504791510063972152790021570560, label %l1
     i512 3923188584616675477397368389504791510063972152790021570561, label %l2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
index 1af782bf8a1c..ef6aecc692b8 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
@@ -9,8 +9,10 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDO1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY3]], %18
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
     %0:_(s64) = COPY $x0
@@ -34,9 +36,12 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY2]], [[UADDO1]]
-    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[COPY2]], [[COPY3]], [[UADDE1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY2]], %34
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
+    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s32) = G_UADDE [[COPY2]], [[COPY3]], %32
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
     ; CHECK-NEXT: $x2 = COPY [[UADDE2]](s64)
@@ -83,8 +88,10 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDO1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY3]], %24
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
     %0:_(s64) = COPY $x0
@@ -111,8 +118,10 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDO1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY3]], %24
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
     %0:_(s64) = COPY $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
index 73e0e0a420da..531794b14042 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-build-vector.mir
@@ -94,22 +94,8 @@ body: |
   bb.0:
     ; CHECK-LABEL: name: widen_v16s1
     ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[COPY]](s8), [[COPY1]](s8), [[COPY2]](s8), [[COPY3]](s8), [[COPY4]](s8), [[COPY5]](s8), [[COPY6]](s8), [[COPY7]](s8), [[COPY8]](s8), [[COPY9]](s8), [[COPY10]](s8), [[COPY11]](s8), [[COPY12]](s8), [[COPY13]](s8), [[COPY14]](s8), [[DEF]](s8)
+    ; CHECK-NEXT: [[DEF1:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8), [[DEF]](s8), [[DEF1]](s8)
     ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<16 x s8>)
     ; CHECK-NEXT: RET_ReallyLR
     %0:_(s1) = G_IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
index d23814b3c022..c7c061a15a2f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-cmp.mir
@@ -49,11 +49,12 @@ body:             |
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967296
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[DEF]](s64), [[C1]]
   ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[DEF]](s64), [[C1]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
   ; CHECK-NEXT:   [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[DEF]](s64), [[C]]
-  ; CHECK-NEXT:   [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ICMP2]], [[ICMP]]
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[SELECT]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC1]](s1), %bb.1
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C2]]
+  ; CHECK-NEXT:   [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ICMP2]], [[ICMP]]
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C2]]
+  ; CHECK-NEXT:   G_BRCOND [[AND1]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -90,8 +91,9 @@ body:             |
   ; CHECK-NEXT:   [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[DEF]], [[DEF]]
   ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s64) = G_OR [[XOR]], [[XOR1]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[OR]](s64), [[C]]
-  ; CHECK-NEXT:   %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp(s1), %bb.1
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -130,8 +132,9 @@ body:             |
   ; CHECK-NEXT:   [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[AND1]], [[AND3]]
   ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s64) = G_OR [[XOR]], [[XOR1]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[OR]](s64), [[C2]]
-  ; CHECK-NEXT:   %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp(s1), %bb.1
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND4:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+  ; CHECK-NEXT:   G_BRCOND [[AND4]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -170,8 +173,9 @@ body:             |
   ; CHECK-NEXT:   [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[AND1]], [[AND3]]
   ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s64) = G_OR [[XOR]], [[XOR1]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[OR]](s64), [[C2]]
-  ; CHECK-NEXT:   %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp(s1), %bb.1
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND4:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+  ; CHECK-NEXT:   G_BRCOND [[AND4]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -210,8 +214,9 @@ body:             |
   ; CHECK-NEXT:   [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[AND1]], [[AND3]]
   ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s64) = G_OR [[XOR]], [[XOR1]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[OR]](s64), [[C2]]
-  ; CHECK-NEXT:   %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp(s1), %bb.1
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND4:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+  ; CHECK-NEXT:   G_BRCOND [[AND4]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -273,8 +278,9 @@ body:             |
   ; CHECK-NEXT:   [[OR5:%[0-9]+]]:_(s64) = G_OR [[OR4]], [[XOR6]]
   ; CHECK-NEXT:   [[OR6:%[0-9]+]]:_(s64) = G_OR [[OR5]], [[XOR7]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[OR6]](s64), [[C2]]
-  ; CHECK-NEXT:   %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp(s1), %bb.1
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND16:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+  ; CHECK-NEXT:   G_BRCOND [[AND16]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -320,8 +326,9 @@ body:             |
   ; CHECK-NEXT:   [[OR1:%[0-9]+]]:_(s64) = G_OR [[OR]], [[XOR2]]
   ; CHECK-NEXT:   [[OR2:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[XOR3]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[OR2]](s64), [[C2]]
-  ; CHECK-NEXT:   %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp(s1), %bb.1
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND8:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+  ; CHECK-NEXT:   G_BRCOND [[AND8]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
index 32ae2203be44..57bd3e761f81 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-freeze.mir
@@ -91,9 +91,11 @@ body: |
   bb.0.entry:
     liveins: $x0
     ; CHECK-LABEL: name: test_freeze_s1
-    ; CHECK: %x:_(s1) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: %freeze:_(s1) = G_FREEZE %x
-    ; CHECK-NEXT: %ext:_(s64) = G_ZEXT %freeze(s1)
+    ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s8) = G_FREEZE [[DEF]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+    ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[FREEZE]](s8)
+    ; CHECK-NEXT: %ext:_(s64) = G_AND [[ANYEXT]], [[C]]
     ; CHECK-NEXT: $x0 = COPY %ext(s64)
     %x:_(s1) = G_IMPLICIT_DEF
     %freeze:_(s1) = G_FREEZE %x

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
index 066668360809..4ee5be07e454 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
@@ -374,189 +374,252 @@ body: |
     ; CHECK-NEXT: [[OR55:%[0-9]+]]:_(s32) = G_OR [[OR54]], [[SHL55]]
     ; CHECK-NEXT: [[TRUNC7:%[0-9]+]]:_(s8) = G_TRUNC [[OR55]](s32)
     ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s8), [[TRUNC1]](s8), [[TRUNC2]](s8), [[TRUNC3]](s8), [[TRUNC4]](s8), [[TRUNC5]](s8), [[TRUNC6]](s8), [[TRUNC7]](s8)
-    ; CHECK-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL56:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s64)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND64:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C7]]
+    ; CHECK-NEXT: [[SHL56:%[0-9]+]]:_(s32) = G_SHL [[AND64]], [[C]](s64)
     ; CHECK-NEXT: [[TRUNC8:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
-    ; CHECK-NEXT: [[AND64:%[0-9]+]]:_(s32) = G_AND [[TRUNC8]], [[C7]]
-    ; CHECK-NEXT: [[OR56:%[0-9]+]]:_(s32) = G_OR [[AND64]], [[SHL56]]
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL57:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s64)
+    ; CHECK-NEXT: [[AND65:%[0-9]+]]:_(s32) = G_AND [[TRUNC8]], [[C7]]
+    ; CHECK-NEXT: [[OR56:%[0-9]+]]:_(s32) = G_OR [[AND65]], [[SHL56]]
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND66:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C7]]
+    ; CHECK-NEXT: [[SHL57:%[0-9]+]]:_(s32) = G_SHL [[AND66]], [[C1]](s64)
     ; CHECK-NEXT: [[OR57:%[0-9]+]]:_(s32) = G_OR [[OR56]], [[SHL57]]
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL58:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND67:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C7]]
+    ; CHECK-NEXT: [[SHL58:%[0-9]+]]:_(s32) = G_SHL [[AND67]], [[C2]](s64)
     ; CHECK-NEXT: [[OR58:%[0-9]+]]:_(s32) = G_OR [[OR57]], [[SHL58]]
-    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL59:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND68:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C7]]
+    ; CHECK-NEXT: [[SHL59:%[0-9]+]]:_(s32) = G_SHL [[AND68]], [[C3]](s64)
     ; CHECK-NEXT: [[OR59:%[0-9]+]]:_(s32) = G_OR [[OR58]], [[SHL59]]
-    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL60:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND69:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C7]]
+    ; CHECK-NEXT: [[SHL60:%[0-9]+]]:_(s32) = G_SHL [[AND69]], [[C4]](s64)
     ; CHECK-NEXT: [[OR60:%[0-9]+]]:_(s32) = G_OR [[OR59]], [[SHL60]]
-    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL61:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND70:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C7]]
+    ; CHECK-NEXT: [[SHL61:%[0-9]+]]:_(s32) = G_SHL [[AND70]], [[C5]](s64)
     ; CHECK-NEXT: [[OR61:%[0-9]+]]:_(s32) = G_OR [[OR60]], [[SHL61]]
-    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL62:%[0-9]+]]:_(s32) = G_SHL [[COPY8]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND71:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C7]]
+    ; CHECK-NEXT: [[SHL62:%[0-9]+]]:_(s32) = G_SHL [[AND71]], [[C6]](s64)
     ; CHECK-NEXT: [[OR62:%[0-9]+]]:_(s32) = G_OR [[OR61]], [[SHL62]]
     ; CHECK-NEXT: [[TRUNC9:%[0-9]+]]:_(s8) = G_TRUNC [[OR62]](s32)
-    ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL63:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR63:%[0-9]+]]:_(s32) = G_OR [[COPY10]], [[SHL63]]
-    ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL64:%[0-9]+]]:_(s32) = G_SHL [[COPY11]], [[C1]](s64)
+    ; CHECK-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND72:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C7]]
+    ; CHECK-NEXT: [[SHL63:%[0-9]+]]:_(s32) = G_SHL [[AND72]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND73:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C7]]
+    ; CHECK-NEXT: [[OR63:%[0-9]+]]:_(s32) = G_OR [[AND73]], [[SHL63]]
+    ; CHECK-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND74:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C7]]
+    ; CHECK-NEXT: [[SHL64:%[0-9]+]]:_(s32) = G_SHL [[AND74]], [[C1]](s64)
     ; CHECK-NEXT: [[OR64:%[0-9]+]]:_(s32) = G_OR [[OR63]], [[SHL64]]
-    ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL65:%[0-9]+]]:_(s32) = G_SHL [[COPY12]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY12:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND75:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C7]]
+    ; CHECK-NEXT: [[SHL65:%[0-9]+]]:_(s32) = G_SHL [[AND75]], [[C2]](s64)
     ; CHECK-NEXT: [[OR65:%[0-9]+]]:_(s32) = G_OR [[OR64]], [[SHL65]]
-    ; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL66:%[0-9]+]]:_(s32) = G_SHL [[COPY13]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY13:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND76:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C7]]
+    ; CHECK-NEXT: [[SHL66:%[0-9]+]]:_(s32) = G_SHL [[AND76]], [[C3]](s64)
     ; CHECK-NEXT: [[OR66:%[0-9]+]]:_(s32) = G_OR [[OR65]], [[SHL66]]
-    ; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL67:%[0-9]+]]:_(s32) = G_SHL [[COPY14]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY14:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND77:%[0-9]+]]:_(s32) = G_AND [[COPY14]], [[C7]]
+    ; CHECK-NEXT: [[SHL67:%[0-9]+]]:_(s32) = G_SHL [[AND77]], [[C4]](s64)
     ; CHECK-NEXT: [[OR67:%[0-9]+]]:_(s32) = G_OR [[OR66]], [[SHL67]]
-    ; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL68:%[0-9]+]]:_(s32) = G_SHL [[COPY15]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY15:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND78:%[0-9]+]]:_(s32) = G_AND [[COPY15]], [[C7]]
+    ; CHECK-NEXT: [[SHL68:%[0-9]+]]:_(s32) = G_SHL [[AND78]], [[C5]](s64)
     ; CHECK-NEXT: [[OR68:%[0-9]+]]:_(s32) = G_OR [[OR67]], [[SHL68]]
-    ; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL69:%[0-9]+]]:_(s32) = G_SHL [[COPY16]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY16:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND79:%[0-9]+]]:_(s32) = G_AND [[COPY16]], [[C7]]
+    ; CHECK-NEXT: [[SHL69:%[0-9]+]]:_(s32) = G_SHL [[AND79]], [[C6]](s64)
     ; CHECK-NEXT: [[OR69:%[0-9]+]]:_(s32) = G_OR [[OR68]], [[SHL69]]
     ; CHECK-NEXT: [[TRUNC10:%[0-9]+]]:_(s8) = G_TRUNC [[OR69]](s32)
-    ; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL70:%[0-9]+]]:_(s32) = G_SHL [[COPY17]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR70:%[0-9]+]]:_(s32) = G_OR [[COPY18]], [[SHL70]]
-    ; CHECK-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL71:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C1]](s64)
+    ; CHECK-NEXT: [[COPY17:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND80:%[0-9]+]]:_(s32) = G_AND [[COPY17]], [[C7]]
+    ; CHECK-NEXT: [[SHL70:%[0-9]+]]:_(s32) = G_SHL [[AND80]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY18:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND81:%[0-9]+]]:_(s32) = G_AND [[COPY18]], [[C7]]
+    ; CHECK-NEXT: [[OR70:%[0-9]+]]:_(s32) = G_OR [[AND81]], [[SHL70]]
+    ; CHECK-NEXT: [[COPY19:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND82:%[0-9]+]]:_(s32) = G_AND [[COPY19]], [[C7]]
+    ; CHECK-NEXT: [[SHL71:%[0-9]+]]:_(s32) = G_SHL [[AND82]], [[C1]](s64)
     ; CHECK-NEXT: [[OR71:%[0-9]+]]:_(s32) = G_OR [[OR70]], [[SHL71]]
-    ; CHECK-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL72:%[0-9]+]]:_(s32) = G_SHL [[COPY20]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY20:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND83:%[0-9]+]]:_(s32) = G_AND [[COPY20]], [[C7]]
+    ; CHECK-NEXT: [[SHL72:%[0-9]+]]:_(s32) = G_SHL [[AND83]], [[C2]](s64)
     ; CHECK-NEXT: [[OR72:%[0-9]+]]:_(s32) = G_OR [[OR71]], [[SHL72]]
-    ; CHECK-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL73:%[0-9]+]]:_(s32) = G_SHL [[COPY21]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY21:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND84:%[0-9]+]]:_(s32) = G_AND [[COPY21]], [[C7]]
+    ; CHECK-NEXT: [[SHL73:%[0-9]+]]:_(s32) = G_SHL [[AND84]], [[C3]](s64)
     ; CHECK-NEXT: [[OR73:%[0-9]+]]:_(s32) = G_OR [[OR72]], [[SHL73]]
-    ; CHECK-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL74:%[0-9]+]]:_(s32) = G_SHL [[COPY22]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY22:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND85:%[0-9]+]]:_(s32) = G_AND [[COPY22]], [[C7]]
+    ; CHECK-NEXT: [[SHL74:%[0-9]+]]:_(s32) = G_SHL [[AND85]], [[C4]](s64)
     ; CHECK-NEXT: [[OR74:%[0-9]+]]:_(s32) = G_OR [[OR73]], [[SHL74]]
-    ; CHECK-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL75:%[0-9]+]]:_(s32) = G_SHL [[COPY23]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY23:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND86:%[0-9]+]]:_(s32) = G_AND [[COPY23]], [[C7]]
+    ; CHECK-NEXT: [[SHL75:%[0-9]+]]:_(s32) = G_SHL [[AND86]], [[C5]](s64)
     ; CHECK-NEXT: [[OR75:%[0-9]+]]:_(s32) = G_OR [[OR74]], [[SHL75]]
-    ; CHECK-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL76:%[0-9]+]]:_(s32) = G_SHL [[COPY24]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY24:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND87:%[0-9]+]]:_(s32) = G_AND [[COPY24]], [[C7]]
+    ; CHECK-NEXT: [[SHL76:%[0-9]+]]:_(s32) = G_SHL [[AND87]], [[C6]](s64)
     ; CHECK-NEXT: [[OR76:%[0-9]+]]:_(s32) = G_OR [[OR75]], [[SHL76]]
     ; CHECK-NEXT: [[TRUNC11:%[0-9]+]]:_(s8) = G_TRUNC [[OR76]](s32)
-    ; CHECK-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL77:%[0-9]+]]:_(s32) = G_SHL [[COPY25]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR77:%[0-9]+]]:_(s32) = G_OR [[COPY26]], [[SHL77]]
-    ; CHECK-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL78:%[0-9]+]]:_(s32) = G_SHL [[COPY27]], [[C1]](s64)
+    ; CHECK-NEXT: [[COPY25:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND88:%[0-9]+]]:_(s32) = G_AND [[COPY25]], [[C7]]
+    ; CHECK-NEXT: [[SHL77:%[0-9]+]]:_(s32) = G_SHL [[AND88]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY26:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND89:%[0-9]+]]:_(s32) = G_AND [[COPY26]], [[C7]]
+    ; CHECK-NEXT: [[OR77:%[0-9]+]]:_(s32) = G_OR [[AND89]], [[SHL77]]
+    ; CHECK-NEXT: [[COPY27:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND90:%[0-9]+]]:_(s32) = G_AND [[COPY27]], [[C7]]
+    ; CHECK-NEXT: [[SHL78:%[0-9]+]]:_(s32) = G_SHL [[AND90]], [[C1]](s64)
     ; CHECK-NEXT: [[OR78:%[0-9]+]]:_(s32) = G_OR [[OR77]], [[SHL78]]
-    ; CHECK-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL79:%[0-9]+]]:_(s32) = G_SHL [[COPY28]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY28:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND91:%[0-9]+]]:_(s32) = G_AND [[COPY28]], [[C7]]
+    ; CHECK-NEXT: [[SHL79:%[0-9]+]]:_(s32) = G_SHL [[AND91]], [[C2]](s64)
     ; CHECK-NEXT: [[OR79:%[0-9]+]]:_(s32) = G_OR [[OR78]], [[SHL79]]
-    ; CHECK-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL80:%[0-9]+]]:_(s32) = G_SHL [[COPY29]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY29:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND92:%[0-9]+]]:_(s32) = G_AND [[COPY29]], [[C7]]
+    ; CHECK-NEXT: [[SHL80:%[0-9]+]]:_(s32) = G_SHL [[AND92]], [[C3]](s64)
     ; CHECK-NEXT: [[OR80:%[0-9]+]]:_(s32) = G_OR [[OR79]], [[SHL80]]
-    ; CHECK-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL81:%[0-9]+]]:_(s32) = G_SHL [[COPY30]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY30:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND93:%[0-9]+]]:_(s32) = G_AND [[COPY30]], [[C7]]
+    ; CHECK-NEXT: [[SHL81:%[0-9]+]]:_(s32) = G_SHL [[AND93]], [[C4]](s64)
     ; CHECK-NEXT: [[OR81:%[0-9]+]]:_(s32) = G_OR [[OR80]], [[SHL81]]
-    ; CHECK-NEXT: [[COPY31:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL82:%[0-9]+]]:_(s32) = G_SHL [[COPY31]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY31:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND94:%[0-9]+]]:_(s32) = G_AND [[COPY31]], [[C7]]
+    ; CHECK-NEXT: [[SHL82:%[0-9]+]]:_(s32) = G_SHL [[AND94]], [[C5]](s64)
     ; CHECK-NEXT: [[OR82:%[0-9]+]]:_(s32) = G_OR [[OR81]], [[SHL82]]
-    ; CHECK-NEXT: [[COPY32:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL83:%[0-9]+]]:_(s32) = G_SHL [[COPY32]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY32:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND95:%[0-9]+]]:_(s32) = G_AND [[COPY32]], [[C7]]
+    ; CHECK-NEXT: [[SHL83:%[0-9]+]]:_(s32) = G_SHL [[AND95]], [[C6]](s64)
     ; CHECK-NEXT: [[OR83:%[0-9]+]]:_(s32) = G_OR [[OR82]], [[SHL83]]
     ; CHECK-NEXT: [[TRUNC12:%[0-9]+]]:_(s8) = G_TRUNC [[OR83]](s32)
-    ; CHECK-NEXT: [[COPY33:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL84:%[0-9]+]]:_(s32) = G_SHL [[COPY33]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY34:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR84:%[0-9]+]]:_(s32) = G_OR [[COPY34]], [[SHL84]]
-    ; CHECK-NEXT: [[COPY35:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL85:%[0-9]+]]:_(s32) = G_SHL [[COPY35]], [[C1]](s64)
+    ; CHECK-NEXT: [[COPY33:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND96:%[0-9]+]]:_(s32) = G_AND [[COPY33]], [[C7]]
+    ; CHECK-NEXT: [[SHL84:%[0-9]+]]:_(s32) = G_SHL [[AND96]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY34:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND97:%[0-9]+]]:_(s32) = G_AND [[COPY34]], [[C7]]
+    ; CHECK-NEXT: [[OR84:%[0-9]+]]:_(s32) = G_OR [[AND97]], [[SHL84]]
+    ; CHECK-NEXT: [[COPY35:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND98:%[0-9]+]]:_(s32) = G_AND [[COPY35]], [[C7]]
+    ; CHECK-NEXT: [[SHL85:%[0-9]+]]:_(s32) = G_SHL [[AND98]], [[C1]](s64)
     ; CHECK-NEXT: [[OR85:%[0-9]+]]:_(s32) = G_OR [[OR84]], [[SHL85]]
-    ; CHECK-NEXT: [[COPY36:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL86:%[0-9]+]]:_(s32) = G_SHL [[COPY36]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY36:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND99:%[0-9]+]]:_(s32) = G_AND [[COPY36]], [[C7]]
+    ; CHECK-NEXT: [[SHL86:%[0-9]+]]:_(s32) = G_SHL [[AND99]], [[C2]](s64)
     ; CHECK-NEXT: [[OR86:%[0-9]+]]:_(s32) = G_OR [[OR85]], [[SHL86]]
-    ; CHECK-NEXT: [[COPY37:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL87:%[0-9]+]]:_(s32) = G_SHL [[COPY37]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY37:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND100:%[0-9]+]]:_(s32) = G_AND [[COPY37]], [[C7]]
+    ; CHECK-NEXT: [[SHL87:%[0-9]+]]:_(s32) = G_SHL [[AND100]], [[C3]](s64)
     ; CHECK-NEXT: [[OR87:%[0-9]+]]:_(s32) = G_OR [[OR86]], [[SHL87]]
-    ; CHECK-NEXT: [[COPY38:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL88:%[0-9]+]]:_(s32) = G_SHL [[COPY38]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY38:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND101:%[0-9]+]]:_(s32) = G_AND [[COPY38]], [[C7]]
+    ; CHECK-NEXT: [[SHL88:%[0-9]+]]:_(s32) = G_SHL [[AND101]], [[C4]](s64)
     ; CHECK-NEXT: [[OR88:%[0-9]+]]:_(s32) = G_OR [[OR87]], [[SHL88]]
-    ; CHECK-NEXT: [[COPY39:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL89:%[0-9]+]]:_(s32) = G_SHL [[COPY39]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY39:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND102:%[0-9]+]]:_(s32) = G_AND [[COPY39]], [[C7]]
+    ; CHECK-NEXT: [[SHL89:%[0-9]+]]:_(s32) = G_SHL [[AND102]], [[C5]](s64)
     ; CHECK-NEXT: [[OR89:%[0-9]+]]:_(s32) = G_OR [[OR88]], [[SHL89]]
-    ; CHECK-NEXT: [[COPY40:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL90:%[0-9]+]]:_(s32) = G_SHL [[COPY40]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY40:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND103:%[0-9]+]]:_(s32) = G_AND [[COPY40]], [[C7]]
+    ; CHECK-NEXT: [[SHL90:%[0-9]+]]:_(s32) = G_SHL [[AND103]], [[C6]](s64)
     ; CHECK-NEXT: [[OR90:%[0-9]+]]:_(s32) = G_OR [[OR89]], [[SHL90]]
     ; CHECK-NEXT: [[TRUNC13:%[0-9]+]]:_(s8) = G_TRUNC [[OR90]](s32)
-    ; CHECK-NEXT: [[COPY41:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL91:%[0-9]+]]:_(s32) = G_SHL [[COPY41]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY42:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR91:%[0-9]+]]:_(s32) = G_OR [[COPY42]], [[SHL91]]
-    ; CHECK-NEXT: [[COPY43:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL92:%[0-9]+]]:_(s32) = G_SHL [[COPY43]], [[C1]](s64)
+    ; CHECK-NEXT: [[COPY41:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND104:%[0-9]+]]:_(s32) = G_AND [[COPY41]], [[C7]]
+    ; CHECK-NEXT: [[SHL91:%[0-9]+]]:_(s32) = G_SHL [[AND104]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY42:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND105:%[0-9]+]]:_(s32) = G_AND [[COPY42]], [[C7]]
+    ; CHECK-NEXT: [[OR91:%[0-9]+]]:_(s32) = G_OR [[AND105]], [[SHL91]]
+    ; CHECK-NEXT: [[COPY43:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND106:%[0-9]+]]:_(s32) = G_AND [[COPY43]], [[C7]]
+    ; CHECK-NEXT: [[SHL92:%[0-9]+]]:_(s32) = G_SHL [[AND106]], [[C1]](s64)
     ; CHECK-NEXT: [[OR92:%[0-9]+]]:_(s32) = G_OR [[OR91]], [[SHL92]]
-    ; CHECK-NEXT: [[COPY44:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL93:%[0-9]+]]:_(s32) = G_SHL [[COPY44]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY44:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND107:%[0-9]+]]:_(s32) = G_AND [[COPY44]], [[C7]]
+    ; CHECK-NEXT: [[SHL93:%[0-9]+]]:_(s32) = G_SHL [[AND107]], [[C2]](s64)
     ; CHECK-NEXT: [[OR93:%[0-9]+]]:_(s32) = G_OR [[OR92]], [[SHL93]]
-    ; CHECK-NEXT: [[COPY45:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL94:%[0-9]+]]:_(s32) = G_SHL [[COPY45]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY45:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND108:%[0-9]+]]:_(s32) = G_AND [[COPY45]], [[C7]]
+    ; CHECK-NEXT: [[SHL94:%[0-9]+]]:_(s32) = G_SHL [[AND108]], [[C3]](s64)
     ; CHECK-NEXT: [[OR94:%[0-9]+]]:_(s32) = G_OR [[OR93]], [[SHL94]]
-    ; CHECK-NEXT: [[COPY46:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL95:%[0-9]+]]:_(s32) = G_SHL [[COPY46]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY46:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND109:%[0-9]+]]:_(s32) = G_AND [[COPY46]], [[C7]]
+    ; CHECK-NEXT: [[SHL95:%[0-9]+]]:_(s32) = G_SHL [[AND109]], [[C4]](s64)
     ; CHECK-NEXT: [[OR95:%[0-9]+]]:_(s32) = G_OR [[OR94]], [[SHL95]]
-    ; CHECK-NEXT: [[COPY47:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL96:%[0-9]+]]:_(s32) = G_SHL [[COPY47]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY47:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND110:%[0-9]+]]:_(s32) = G_AND [[COPY47]], [[C7]]
+    ; CHECK-NEXT: [[SHL96:%[0-9]+]]:_(s32) = G_SHL [[AND110]], [[C5]](s64)
     ; CHECK-NEXT: [[OR96:%[0-9]+]]:_(s32) = G_OR [[OR95]], [[SHL96]]
-    ; CHECK-NEXT: [[COPY48:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL97:%[0-9]+]]:_(s32) = G_SHL [[COPY48]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY48:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND111:%[0-9]+]]:_(s32) = G_AND [[COPY48]], [[C7]]
+    ; CHECK-NEXT: [[SHL97:%[0-9]+]]:_(s32) = G_SHL [[AND111]], [[C6]](s64)
     ; CHECK-NEXT: [[OR97:%[0-9]+]]:_(s32) = G_OR [[OR96]], [[SHL97]]
     ; CHECK-NEXT: [[TRUNC14:%[0-9]+]]:_(s8) = G_TRUNC [[OR97]](s32)
-    ; CHECK-NEXT: [[COPY49:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL98:%[0-9]+]]:_(s32) = G_SHL [[COPY49]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY50:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR98:%[0-9]+]]:_(s32) = G_OR [[COPY50]], [[SHL98]]
-    ; CHECK-NEXT: [[COPY51:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL99:%[0-9]+]]:_(s32) = G_SHL [[COPY51]], [[C1]](s64)
+    ; CHECK-NEXT: [[COPY49:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND112:%[0-9]+]]:_(s32) = G_AND [[COPY49]], [[C7]]
+    ; CHECK-NEXT: [[SHL98:%[0-9]+]]:_(s32) = G_SHL [[AND112]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY50:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND113:%[0-9]+]]:_(s32) = G_AND [[COPY50]], [[C7]]
+    ; CHECK-NEXT: [[OR98:%[0-9]+]]:_(s32) = G_OR [[AND113]], [[SHL98]]
+    ; CHECK-NEXT: [[COPY51:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND114:%[0-9]+]]:_(s32) = G_AND [[COPY51]], [[C7]]
+    ; CHECK-NEXT: [[SHL99:%[0-9]+]]:_(s32) = G_SHL [[AND114]], [[C1]](s64)
     ; CHECK-NEXT: [[OR99:%[0-9]+]]:_(s32) = G_OR [[OR98]], [[SHL99]]
-    ; CHECK-NEXT: [[COPY52:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL100:%[0-9]+]]:_(s32) = G_SHL [[COPY52]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY52:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND115:%[0-9]+]]:_(s32) = G_AND [[COPY52]], [[C7]]
+    ; CHECK-NEXT: [[SHL100:%[0-9]+]]:_(s32) = G_SHL [[AND115]], [[C2]](s64)
     ; CHECK-NEXT: [[OR100:%[0-9]+]]:_(s32) = G_OR [[OR99]], [[SHL100]]
-    ; CHECK-NEXT: [[COPY53:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL101:%[0-9]+]]:_(s32) = G_SHL [[COPY53]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY53:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND116:%[0-9]+]]:_(s32) = G_AND [[COPY53]], [[C7]]
+    ; CHECK-NEXT: [[SHL101:%[0-9]+]]:_(s32) = G_SHL [[AND116]], [[C3]](s64)
     ; CHECK-NEXT: [[OR101:%[0-9]+]]:_(s32) = G_OR [[OR100]], [[SHL101]]
-    ; CHECK-NEXT: [[COPY54:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL102:%[0-9]+]]:_(s32) = G_SHL [[COPY54]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY54:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND117:%[0-9]+]]:_(s32) = G_AND [[COPY54]], [[C7]]
+    ; CHECK-NEXT: [[SHL102:%[0-9]+]]:_(s32) = G_SHL [[AND117]], [[C4]](s64)
     ; CHECK-NEXT: [[OR102:%[0-9]+]]:_(s32) = G_OR [[OR101]], [[SHL102]]
-    ; CHECK-NEXT: [[COPY55:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL103:%[0-9]+]]:_(s32) = G_SHL [[COPY55]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY55:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND118:%[0-9]+]]:_(s32) = G_AND [[COPY55]], [[C7]]
+    ; CHECK-NEXT: [[SHL103:%[0-9]+]]:_(s32) = G_SHL [[AND118]], [[C5]](s64)
     ; CHECK-NEXT: [[OR103:%[0-9]+]]:_(s32) = G_OR [[OR102]], [[SHL103]]
-    ; CHECK-NEXT: [[COPY56:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL104:%[0-9]+]]:_(s32) = G_SHL [[COPY56]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY56:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND119:%[0-9]+]]:_(s32) = G_AND [[COPY56]], [[C7]]
+    ; CHECK-NEXT: [[SHL104:%[0-9]+]]:_(s32) = G_SHL [[AND119]], [[C6]](s64)
     ; CHECK-NEXT: [[OR104:%[0-9]+]]:_(s32) = G_OR [[OR103]], [[SHL104]]
     ; CHECK-NEXT: [[TRUNC15:%[0-9]+]]:_(s8) = G_TRUNC [[OR104]](s32)
-    ; CHECK-NEXT: [[SHL105:%[0-9]+]]:_(s32) = G_SHL [[C8]], [[C]](s64)
-    ; CHECK-NEXT: [[COPY57:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[OR105:%[0-9]+]]:_(s32) = G_OR [[COPY57]], [[SHL105]]
-    ; CHECK-NEXT: [[COPY58:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL106:%[0-9]+]]:_(s32) = G_SHL [[COPY58]], [[C1]](s64)
+    ; CHECK-NEXT: [[AND120:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[C7]]
+    ; CHECK-NEXT: [[SHL105:%[0-9]+]]:_(s32) = G_SHL [[AND120]], [[C]](s64)
+    ; CHECK-NEXT: [[COPY57:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND121:%[0-9]+]]:_(s32) = G_AND [[COPY57]], [[C7]]
+    ; CHECK-NEXT: [[OR105:%[0-9]+]]:_(s32) = G_OR [[AND121]], [[SHL105]]
+    ; CHECK-NEXT: [[COPY58:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND122:%[0-9]+]]:_(s32) = G_AND [[COPY58]], [[C7]]
+    ; CHECK-NEXT: [[SHL106:%[0-9]+]]:_(s32) = G_SHL [[AND122]], [[C1]](s64)
     ; CHECK-NEXT: [[OR106:%[0-9]+]]:_(s32) = G_OR [[OR105]], [[SHL106]]
-    ; CHECK-NEXT: [[COPY59:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL107:%[0-9]+]]:_(s32) = G_SHL [[COPY59]], [[C2]](s64)
+    ; CHECK-NEXT: [[COPY59:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND123:%[0-9]+]]:_(s32) = G_AND [[COPY59]], [[C7]]
+    ; CHECK-NEXT: [[SHL107:%[0-9]+]]:_(s32) = G_SHL [[AND123]], [[C2]](s64)
     ; CHECK-NEXT: [[OR107:%[0-9]+]]:_(s32) = G_OR [[OR106]], [[SHL107]]
-    ; CHECK-NEXT: [[COPY60:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL108:%[0-9]+]]:_(s32) = G_SHL [[COPY60]], [[C3]](s64)
+    ; CHECK-NEXT: [[COPY60:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND124:%[0-9]+]]:_(s32) = G_AND [[COPY60]], [[C7]]
+    ; CHECK-NEXT: [[SHL108:%[0-9]+]]:_(s32) = G_SHL [[AND124]], [[C3]](s64)
     ; CHECK-NEXT: [[OR108:%[0-9]+]]:_(s32) = G_OR [[OR107]], [[SHL108]]
-    ; CHECK-NEXT: [[COPY61:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL109:%[0-9]+]]:_(s32) = G_SHL [[COPY61]], [[C4]](s64)
+    ; CHECK-NEXT: [[COPY61:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND125:%[0-9]+]]:_(s32) = G_AND [[COPY61]], [[C7]]
+    ; CHECK-NEXT: [[SHL109:%[0-9]+]]:_(s32) = G_SHL [[AND125]], [[C4]](s64)
     ; CHECK-NEXT: [[OR109:%[0-9]+]]:_(s32) = G_OR [[OR108]], [[SHL109]]
-    ; CHECK-NEXT: [[COPY62:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL110:%[0-9]+]]:_(s32) = G_SHL [[COPY62]], [[C5]](s64)
+    ; CHECK-NEXT: [[COPY62:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND126:%[0-9]+]]:_(s32) = G_AND [[COPY62]], [[C7]]
+    ; CHECK-NEXT: [[SHL110:%[0-9]+]]:_(s32) = G_SHL [[AND126]], [[C5]](s64)
     ; CHECK-NEXT: [[OR110:%[0-9]+]]:_(s32) = G_OR [[OR109]], [[SHL110]]
-    ; CHECK-NEXT: [[COPY63:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
-    ; CHECK-NEXT: [[SHL111:%[0-9]+]]:_(s32) = G_SHL [[COPY63]], [[C6]](s64)
+    ; CHECK-NEXT: [[COPY63:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+    ; CHECK-NEXT: [[AND127:%[0-9]+]]:_(s32) = G_AND [[COPY63]], [[C7]]
+    ; CHECK-NEXT: [[SHL111:%[0-9]+]]:_(s32) = G_SHL [[AND127]], [[C6]](s64)
     ; CHECK-NEXT: [[OR111:%[0-9]+]]:_(s32) = G_OR [[OR110]], [[SHL111]]
     ; CHECK-NEXT: [[TRUNC16:%[0-9]+]]:_(s8) = G_TRUNC [[OR111]](s32)
     ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC9]](s8), [[TRUNC10]](s8), [[TRUNC11]](s8), [[TRUNC12]](s8), [[TRUNC13]](s8), [[TRUNC14]](s8), [[TRUNC15]](s8), [[TRUNC16]](s8)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-min-max.mir
index 505a5a61be97..3ad54ba3f17f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-min-max.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-intrinsic-min-max.mir
@@ -18,8 +18,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY]](s32), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
@@ -46,8 +47,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(slt), [[COPY]](s64), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
@@ -74,8 +76,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s32), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
@@ -102,8 +105,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), [[COPY]](s64), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
@@ -132,8 +136,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
@@ -160,8 +165,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
@@ -188,8 +194,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
@@ -216,8 +223,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s64), [[COPY1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[COPY]], [[COPY1]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
index 66850abc05e6..d0c65c9fbd35 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-itofp.mir
@@ -245,8 +245,7 @@ body: |
     liveins: $q0
     ; CHECK-LABEL: name: test_uitofp_v2s64_v2i1
     ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[COPY]](s8), [[DEF]](s8)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
     ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<2 x s64>) = G_ANYEXT [[BUILD_VECTOR]](<2 x s8>)
@@ -265,8 +264,7 @@ body: |
     liveins: $q0
     ; CHECK-LABEL: name: test_sitofp_v2s64_v2i1
     ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s8) = COPY [[DEF]](s8)
-    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[COPY]](s8), [[DEF]](s8)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8)
     ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<2 x s64>) = G_ANYEXT [[BUILD_VECTOR]](<2 x s8>)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
     ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
index 9c682cd50e27..60d7af273eb5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi-insertpt-decrement.mir
@@ -51,27 +51,30 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(p0) = G_PHI %6(p0), %bb.2, [[DEF]](p0), %bb.0
-  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s16) = G_PHI %20(s16), %bb.2, [[DEF1]](s16), %bb.0
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[PHI1]](s16)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.3
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s16) = G_PHI %22(s16), %bb.2, [[DEF1]](s16), %bb.0
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.bb3:
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.1(0x40000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
-  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C1]](s64)
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[INTTOPTR]], [[C2]](s64)
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PHI]](p0) :: (load (s16) from %ir.lsr.iv)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
-  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
-  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT]](s32), [[C2]]
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
+  ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+  ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT]](s32), [[C3]]
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p0) :: (load (s16) from %ir.tmp5)
   ; CHECK-NEXT:   [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
   ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[ZEXT1]](s32), [[COPY]]
-  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PHI]], [[C1]](s64)
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ICMP1]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC1]](s1), %bb.3
+  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PHI]], [[C2]](s64)
+  ; CHECK-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C4]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ICMP1]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[AND1]](s32), %bb.3
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.bb10:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
index 10a85292c2f2..3c952cb779d2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-phi.mir
@@ -32,29 +32,29 @@ body:             |
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]]
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
   ; CHECK-NEXT:   G_BR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]]
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3:
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[TRUNC2]](s16), %bb.2
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.1, [[TRUNC1]](s16), %bb.2
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
-  ; CHECK-NEXT:   $w0 = COPY [[AND]](s32)
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
+  ; CHECK-NEXT:   $w0 = COPY [[AND1]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   bb.0:
    ; Test that we insert legalization artifacts(Truncs here) into the correct BBs
@@ -118,8 +118,9 @@ body:             |
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p0) = COPY $x0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p0) = COPY $x1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $w2
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -183,29 +184,29 @@ body:             |
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]]
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]]
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
   ; CHECK-NEXT:   G_BR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3:
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[TRUNC2]](s16), %bb.2
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.1, [[TRUNC1]](s16), %bb.2
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
-  ; CHECK-NEXT:   $w0 = COPY [[AND]](s32)
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
+  ; CHECK-NEXT:   $w0 = COPY [[AND1]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
@@ -273,21 +274,21 @@ body:             |
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[C]](s16), %bb.0, %13(s16), %bb.1
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[C]](s16), %bb.0, %14(s16), %bb.1
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ANYEXT]], [[C1]]
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[AND]](s32), [[COPY]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
+  ; CHECK-NEXT:   G_BRCOND [[AND1]](s32), %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
-  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]]
-  ; CHECK-NEXT:   $w0 = COPY [[AND1]](s32)
+  ; CHECK-NEXT:   [[AND2:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]]
+  ; CHECK-NEXT:   $w0 = COPY [[AND2]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x80000000)
@@ -345,8 +346,9 @@ body:             |
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
   ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C1]]
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[AND]](s32), [[COPY]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+  ; CHECK-NEXT:   G_BRCOND [[AND1]](s32), %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   $w0 = COPY [[AND]](s32)
@@ -409,34 +411,34 @@ body:             |
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]]
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C2]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ADD1]](s32)
   ; CHECK-NEXT:   G_BR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 42
-  ; CHECK-NEXT:   [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
+  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3:
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC2]](s16), %bb.1, [[TRUNC3]](s16), %bb.2
-  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[C3]](s16), %bb.2
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[TRUNC2]](s16), %bb.2
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s16) = G_PHI [[TRUNC]](s16), %bb.1, [[C3]](s16), %bb.2
   ; CHECK-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C4]]
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C4]]
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
-  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C4]]
-  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[AND1]]
+  ; CHECK-NEXT:   [[AND2:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C4]]
+  ; CHECK-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[AND2]]
   ; CHECK-NEXT:   $w0 = COPY [[ADD2]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   bb.0:
@@ -519,33 +521,34 @@ body:             |
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 44
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[COPY]](s32), [[C]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[C1]]
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
-  ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ADD]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC2]](s16), %bb.0, %21(s16), %bb.1
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.0, %22(s16), %bb.1
   ; CHECK-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
-  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C4]]
-  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[C2]]
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C4]]
+  ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[AND1]], [[C2]]
   ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ugt), [[ADD1]](s32), [[C3]]
-  ; CHECK-NEXT:   [[TRUNC3:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
-  ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 43
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC3]](s1), %bb.2
+  ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C5]]
+  ; CHECK-NEXT:   [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 43
+  ; CHECK-NEXT:   G_BRCOND [[AND2]](s32), %bb.2
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
-  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s16) = G_PHI [[PHI]](s16), %bb.1, [[TRUNC1]](s16), %bb.0
-  ; CHECK-NEXT:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s16) = G_PHI [[PHI]](s16), %bb.1, [[TRUNC]](s16), %bb.0
+  ; CHECK-NEXT:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI1]](s16)
-  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C6]]
-  ; CHECK-NEXT:   $w0 = COPY [[AND1]](s32)
+  ; CHECK-NEXT:   [[AND3:%[0-9]+]]:_(s32) = G_AND [[ANYEXT1]], [[C7]]
+  ; CHECK-NEXT:   $w0 = COPY [[AND3]](s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   bb.0:
     successors: %bb.1(0x40000000), %bb.3(0x40000000)
@@ -668,23 +671,25 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT:   liveins: $x0
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %cond:_(s1) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[C]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_BR %bb.3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.3(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[DEF2:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3:
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[DEF]](s64), %bb.1, [[DEF1]](s64), %bb.2
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI [[DEF1]](s64), %bb.1, [[DEF2]](s64), %bb.2
   ; CHECK-NEXT:   $x0 = COPY [[PHI]](s64)
   ; CHECK-NEXT:   RET_ReallyLR implicit $x0
   bb.0:
@@ -718,20 +723,22 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %ptr1:_(p0) = COPY $x1
   ; CHECK-NEXT:   %ptr2:_(p0) = COPY $x0
-  ; CHECK-NEXT:   %cond:_(s1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr1(p0) :: (load (<2 x s64>), align 32)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr1, [[C]](s64)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.2
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[C1]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.2
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr2(p0) :: (load (<2 x s64>), align 32)
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr2, [[C1]](s64)
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr2, [[C2]](s64)
   ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 16)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
@@ -769,10 +776,12 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x50000000), %bb.2(0x30000000)
   ; CHECK-NEXT:   liveins: $x0, $x1
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %cond:_(s1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.2
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY [[DEF]](s32)
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.2
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -783,8 +792,8 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(<4 x s32>) = G_PHI [[BUILD_VECTOR1]](<4 x s32>), %bb.1, [[BUILD_VECTOR]](<4 x s32>), %bb.0
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-  ; CHECK-NEXT:   %extract:_(s32) = G_EXTRACT_VECTOR_ELT [[PHI]](<4 x s32>), [[C]](s64)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; CHECK-NEXT:   %extract:_(s32) = G_EXTRACT_VECTOR_ELT [[PHI]](<4 x s32>), [[C1]](s64)
   ; CHECK-NEXT:   $w0 = COPY %extract(s32)
   ; CHECK-NEXT:   RET_ReallyLR implicit $w0
   bb.0:
@@ -813,22 +822,24 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x50000000), %bb.2(0x30000000)
   ; CHECK-NEXT:   liveins: $x0, $x1
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %cond:_(s1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.2
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF1]], [[C]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.2
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF1]](s16), [[DEF1]](s16), [[DEF1]](s16), [[DEF1]](s16), [[DEF1]](s16), [[DEF1]](s16), [[DEF1]](s16), [[DEF1]](s16)
+  ; CHECK-NEXT:   [[DEF2:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF2]](s16), [[DEF2]](s16), [[DEF2]](s16), [[DEF2]](s16), [[DEF2]](s16), [[DEF2]](s16), [[DEF2]](s16), [[DEF2]](s16)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(<8 x s16>) = G_PHI [[BUILD_VECTOR1]](<8 x s16>), %bb.1, [[BUILD_VECTOR]](<8 x s16>), %bb.0
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
-  ; CHECK-NEXT:   %extract:_(s16) = G_EXTRACT_VECTOR_ELT [[PHI]](<8 x s16>), [[C]](s64)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
+  ; CHECK-NEXT:   %extract:_(s16) = G_EXTRACT_VECTOR_ELT [[PHI]](<8 x s16>), [[C1]](s64)
   ; CHECK-NEXT:   $h0 = COPY %extract(s16)
   ; CHECK-NEXT:   RET_ReallyLR implicit $h0
   bb.0:
@@ -902,14 +913,16 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %ptr1:_(p0) = COPY $x1
   ; CHECK-NEXT:   %ptr2:_(p0) = COPY $x0
-  ; CHECK-NEXT:   %cond:_(s1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr1(p0) :: (load (<2 x s64>), align 32)
   ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD]](<2 x s64>)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr1, [[C]](s64)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD]](p0) :: (load (<2 x s64>) from unknown-address + 16)
   ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD1]](<2 x s64>)
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.2
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[C1]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.2
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -917,8 +930,8 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(<2 x s64>) = G_LOAD %ptr2(p0) :: (load (<2 x s64>), align 32)
   ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD2]](<2 x s64>)
-  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
-  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr2, [[C1]](s64)
+  ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+  ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD %ptr2, [[C2]](s64)
   ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[PTR_ADD1]](p0) :: (load (<2 x s64>) from unknown-address + 16)
   ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[LOAD3]](<2 x s64>)
   ; CHECK-NEXT: {{  $}}

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sadde.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sadde.mir
index bed3f33c7b4a..28ddb9958361 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sadde.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sadde.mir
@@ -11,12 +11,15 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY]], [[COPY2]], %carry_in
-    ; CHECK-NEXT: %19:_(s64), %carry_out:_(s1) = G_SADDE [[COPY1]], [[COPY3]], [[UADDE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY]], [[COPY2]], %24
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[SADDE:%[0-9]+]]:_(s64), [[SADDE1:%[0-9]+]]:_(s32) = G_SADDE [[COPY1]], [[COPY3]], %22
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SADDE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDE]](s64)
-    ; CHECK-NEXT: $x1 = COPY %19(s64)
+    ; CHECK-NEXT: $x1 = COPY [[SADDE]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -44,14 +47,18 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY]], [[COPY1]], %carry_in
-    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY2]], [[UADDE1]]
-    ; CHECK-NEXT: %24:_(s64), %carry_out:_(s1) = G_SADDE [[COPY2]], [[COPY3]], [[UADDE3]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY]], [[COPY1]], %31
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY2]], %29
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
+    ; CHECK-NEXT: [[SADDE:%[0-9]+]]:_(s64), [[SADDE1:%[0-9]+]]:_(s32) = G_SADDE [[COPY2]], [[COPY3]], %27
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UADDE3]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SADDE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDE]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE2]](s64)
-    ; CHECK-NEXT: $x2 = COPY %24(s64)
+    ; CHECK-NEXT: $x2 = COPY [[SADDE]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -78,12 +85,14 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY2]](s64)
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 8
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[SEXT_INREG]], [[SEXT_INREG1]], %carry_in
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[SEXT_INREG]], [[SEXT_INREG1]], %18
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UADDE]], 8
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UADDE]](s32), [[SEXT_INREG2]]
     ; CHECK-NEXT: %add_ext:_(s64) = G_ANYEXT [[UADDE]](s32)
@@ -111,11 +120,11 @@ body:             |
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: %add:_(s32), %carry_out:_(s1) = G_SADDE %lhs, %rhs, %carry_in
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %add:_(s32), %7:_(s32) = G_SADDE %lhs, %rhs, %8
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
     ; CHECK-NEXT: $w0 = COPY %add(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %7(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %2:_(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddo.mir
index 7d5cb7ffa683..c9462ff3ed4b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddo.mir
@@ -10,11 +10,13 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: %17:_(s64), %carry_out:_(s1) = G_SADDE [[COPY1]], [[COPY3]], [[UADDO1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[SADDE:%[0-9]+]]:_(s64), [[SADDE1:%[0-9]+]]:_(s32) = G_SADDE [[COPY1]], [[COPY3]], %20
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SADDE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
-    ; CHECK-NEXT: $x1 = COPY %17(s64)
+    ; CHECK-NEXT: $x1 = COPY [[SADDE]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -39,13 +41,16 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY2]], [[UADDO1]]
-    ; CHECK-NEXT: %22:_(s64), %carry_out:_(s1) = G_SADDE [[COPY2]], [[COPY3]], [[UADDE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY2]], %27
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
+    ; CHECK-NEXT: [[SADDE:%[0-9]+]]:_(s64), [[SADDE1:%[0-9]+]]:_(s32) = G_SADDE [[COPY2]], [[COPY3]], %25
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SADDE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
-    ; CHECK-NEXT: $x2 = COPY %22(s64)
+    ; CHECK-NEXT: $x2 = COPY [[SADDE]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -98,10 +103,9 @@ body:             |
     ; CHECK-LABEL: name: test_scalar_saddo_32
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
-    ; CHECK-NEXT: %add:_(s32), %carry_out:_(s1) = G_SADDO %lhs, %rhs
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %add:_(s32), %5:_(s32) = G_SADDO %lhs, %rhs
     ; CHECK-NEXT: $w0 = COPY %add(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %5(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %add:_(s32), %carry_out:_(s1) = G_SADDO %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddsat.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddsat.mir
index cfce6fd39022..78e3cb6298ff 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddsat.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-saddsat.mir
@@ -13,12 +13,14 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %x:_(s32) = COPY $w0
     ; CHECK-NEXT: %y:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[SADDO:%[0-9]+]]:_(s32), [[SADDO1:%[0-9]+]]:_(s1) = G_SADDO %x, %y
+    ; CHECK-NEXT: [[SADDO:%[0-9]+]]:_(s32), [[SADDO1:%[0-9]+]]:_(s32) = G_SADDO %x, %y
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SADDO]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: %saddsat:_(s32) = G_SELECT [[SADDO1]](s1), [[ADD]], [[SADDO]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SADDO1]], [[C2]]
+    ; CHECK-NEXT: %saddsat:_(s32) = G_SELECT [[AND]](s32), [[ADD]], [[SADDO]]
     ; CHECK-NEXT: $w0 = COPY %saddsat(s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
@@ -39,12 +41,14 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %x:_(s64) = COPY $x0
     ; CHECK-NEXT: %y:_(s64) = COPY $x1
-    ; CHECK-NEXT: [[SADDO:%[0-9]+]]:_(s64), [[SADDO1:%[0-9]+]]:_(s1) = G_SADDO %x, %y
+    ; CHECK-NEXT: [[SADDO:%[0-9]+]]:_(s64), [[SADDO1:%[0-9]+]]:_(s32) = G_SADDO %x, %y
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SADDO]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: %saddsat:_(s64) = G_SELECT [[SADDO1]](s1), [[ADD]], [[SADDO]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SADDO1]], [[C2]]
+    ; CHECK-NEXT: %saddsat:_(s64) = G_SELECT [[AND]](s32), [[ADD]], [[SADDO]]
     ; CHECK-NEXT: $x0 = COPY %saddsat(s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s64) = COPY $x0
@@ -71,13 +75,14 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ADD]], 16
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ADD]], 16
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG3]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
     ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ADD1]], [[ADD]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ADD1]], [[ADD]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy_1:_(s32) = COPY $w0
@@ -107,13 +112,13 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ADD]], 1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ADD]], 1
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG3]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ADD1]], [[ADD]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ADD1]], [[ADD]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy_1:_(s32) = COPY $w0
@@ -143,13 +148,14 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ADD]], 3
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ADD]], 3
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG3]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ADD1]], [[ADD]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ADD1]], [[ADD]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy_1:_(s32) = COPY $w0
@@ -179,14 +185,15 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 36
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s64), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 35
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 36
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[C]](s64)
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG3]], [[COPY]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 34359738368
     ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[ADD1]], [[ADD]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[ADD1]], [[ADD]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy_1:_(s64) = COPY $x0
@@ -215,15 +222,16 @@ body:             |
     ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy_1(s128)
     ; CHECK-NEXT: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT %copy_2(s128), 0
     ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy_2(s128)
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[EXTRACT]], [[EXTRACT1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[EXTRACT]], [[EXTRACT1]]
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV1]](s64)
     ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 24
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64)
     ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 24
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[SEXT_INREG]], [[SEXT_INREG1]], [[UADDO1]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[SEXT_INREG]], [[SEXT_INREG1]], %85
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[UADDE]], 24
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UADDE]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UADDO]](s64)
     ; CHECK-NEXT: [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[UADDE]](s32)
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
@@ -233,19 +241,20 @@ body:             |
     ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s32) = G_MERGE_VALUES [[UV8]](s8), [[UV9]](s8), [[UV10]](s8), [[UV11]](s8)
     ; CHECK-NEXT: [[MV2:%[0-9]+]]:_(s32) = G_MERGE_VALUES [[UV12]](s8), [[UV13]](s8), [[UV14]](s8), [[DEF]](s8)
     ; CHECK-NEXT: [[MV3:%[0-9]+]]:_(s32) = G_MERGE_VALUES [[UV16]](s8), [[UV17]](s8), [[UV18]](s8), [[UV16]](s8)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
     ; CHECK-NEXT: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MV]](s32), [[MV1]](s32)
     ; CHECK-NEXT: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MV2]](s32), [[MV3]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV5]], [[C1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV4]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV5]], [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV4]], [[C3]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR]]
-    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[OR]], [[C1]](s64)
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 23
-    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[C3]](s64)
-    ; CHECK-NEXT: [[UADDO2:%[0-9]+]]:_(s64), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[ASHR1]], [[C]]
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[OR]], [[C2]](s64)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 23
+    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[C4]](s64)
+    ; CHECK-NEXT: [[UADDO2:%[0-9]+]]:_(s64), [[UADDO3:%[0-9]+]]:_(s32) = G_UADDO [[ASHR1]], [[C]]
     ; CHECK-NEXT: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MV]](s32), [[MV1]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UADDO2]], [[MV6]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[UADDO2]], [[MV6]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy_1:_(s128) = COPY $q0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
index d32965395050..52e17c4b9f53 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir
@@ -134,17 +134,20 @@ body:             |
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[C]]
-    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[ICMP]], 1
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[AND]], 1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SEXT_INREG]](s32)
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[SEXT_INREG]](s32), [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s32>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY2]](s32), [[C3]](s64)
     ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s32>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s32>), [[DEF]], shufflemask(0, 0, 0, 0)
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C3]](s32), [[C3]](s32), [[C3]](s32), [[C3]](s32)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C4]](s32), [[C4]](s32), [[C4]](s32), [[C4]](s32)
     ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<4 x s32>) = G_XOR [[SHUF]], [[BUILD_VECTOR1]]
-    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[SHUF]]
-    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[BUILD_VECTOR]], [[XOR]]
-    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[AND]], [[AND1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[SHUF]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<4 x s32>) = G_AND [[BUILD_VECTOR]], [[XOR]]
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<4 x s32>) = G_OR [[AND1]], [[AND2]]
     ; CHECK-NEXT: $q0 = COPY [[OR]](<4 x s32>)
     ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:_(s32) = COPY $w0
@@ -171,9 +174,10 @@ body:             |
     ; CHECK-NEXT: %a:_(s32) = COPY $w0
     ; CHECK-NEXT: %b:_(s32) = COPY $w1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(sgt), %a(s32), %b
-    ; CHECK-NEXT: %cmp:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT %cmp(s1), [[DEF]], [[DEF]]
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[DEF]], [[DEF]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %a:_(s32) = COPY $w0
@@ -302,10 +306,11 @@ body:             |
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
     ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
     ; CHECK-NEXT: %cmp:_(s1) = G_ICMP intpred(eq), %w0(s32), [[C]]
-    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s1) = COPY %cmp(s1)
+    ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT %cmp(s1)
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ZEXT]](s32)
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<4 x s1>) = G_IMPLICIT_DEF
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[COPY]](s1), [[C2]](s64)
+    ; CHECK-NEXT: [[IVEC:%[0-9]+]]:_(<4 x s1>) = G_INSERT_VECTOR_ELT [[DEF]], [[TRUNC]](s1), [[C2]](s64)
     ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s1>) = G_SHUFFLE_VECTOR [[IVEC]](<4 x s1>), [[DEF]], shufflemask(0, 0, 0, 0)
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
     ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s1>) = G_BUILD_VECTOR [[C3]](s1), [[C3]](s1), [[C3]](s1), [[C3]](s1)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
index b0fdd9e632ed..e7073bdc3d68 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
@@ -103,17 +103,19 @@ body:             |
     ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[TRUNC]](s64)
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s64)
     ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL1]]
     ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[SHL]], [[C1]]
-    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[SHL2]]
-    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV1]], [[SELECT1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[SHL]], [[C1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[OR]], [[SHL2]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C2]]
+    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s32), [[UV1]], [[SELECT1]]
     ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
     ; CHECK-NEXT: $q0 = COPY [[MV]](s128)
     %0:_(s128) = COPY $q0
@@ -137,17 +139,19 @@ body:             |
     ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s64)
     ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s64)
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
     ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[LSHR2]]
-    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV]], [[SELECT]]
-    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[LSHR]], [[C1]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[OR]], [[LSHR2]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C2]]
+    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[UV]], [[SELECT]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s32), [[LSHR]], [[C1]]
     ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
     ; CHECK-NEXT: $q0 = COPY [[MV]](s128)
     %0:_(s128) = COPY $q0
@@ -171,9 +175,7 @@ body:             |
     ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s64)
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s64)
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s64)
@@ -181,9 +183,13 @@ body:             |
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
     ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s64)
     ; CHECK-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[ASHR2]]
-    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV]], [[SELECT]]
-    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[ASHR]], [[ASHR1]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[OR]], [[ASHR2]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C3]]
+    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[UV]], [[SELECT]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s32), [[ASHR]], [[ASHR1]]
     ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
     ; CHECK-NEXT: $q0 = COPY [[MV]](s128)
     %0:_(s128) = COPY $q0
@@ -485,15 +491,16 @@ body:             |
     ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C1]], [[COPY]]
     ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s64), [[C2]]
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[COPY]](s64)
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[SUB1]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
     ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[MV1]], [[SUB]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[OR]], [[LSHR1]]
-    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[MV]], [[SELECT]]
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C3]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[OR]], [[LSHR1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C3]]
+    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[MV]], [[SELECT]]
     ; CHECK-NEXT: %d1:_(s32), %d2:_(s32) = G_UNMERGE_VALUES [[SELECT1]](s64)
     ; CHECK-NEXT: $w0 = COPY %d2(s32)
     %0:_(s64) = COPY $x0
@@ -520,41 +527,42 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[C2]]
     ; CHECK-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C2]], [[COPY]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s64), [[C2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
     ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK-NEXT: [[SUB2:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[C3]]
     ; CHECK-NEXT: [[SUB3:%[0-9]+]]:_(s64) = G_SUB [[C3]], [[COPY]]
     ; CHECK-NEXT: [[ICMP2:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[COPY]](s64), [[C3]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP2]](s32)
     ; CHECK-NEXT: [[ICMP3:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[COPY]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC3:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP3]](s32)
     ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV]], [[COPY]](s64)
     ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV1]], [[SUB3]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
     ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[MV1]], [[SUB2]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[OR]], [[LSHR1]]
-    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC3]](s1), [[MV]], [[SELECT]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP2]], [[C4]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[OR]], [[LSHR1]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP3]], [[C4]]
+    ; CHECK-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[MV]], [[SELECT]]
     ; CHECK-NEXT: [[ICMP4:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[SUB1]](s64), [[C3]]
-    ; CHECK-NEXT: [[TRUNC4:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP4]](s32)
     ; CHECK-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[MV2]], [[SUB1]](s64)
-    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC4]](s1), [[SHL1]], [[C1]]
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ICMP4]], [[C4]]
+    ; CHECK-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[AND2]](s32), [[SHL1]], [[C1]]
     ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SELECT1]], [[SELECT2]]
     ; CHECK-NEXT: [[SUB4:%[0-9]+]]:_(s64) = G_SUB [[SUB]], [[C3]]
     ; CHECK-NEXT: [[SUB5:%[0-9]+]]:_(s64) = G_SUB [[C3]], [[SUB]]
     ; CHECK-NEXT: [[ICMP5:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[SUB]](s64), [[C3]]
-    ; CHECK-NEXT: [[TRUNC5:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP5]](s32)
     ; CHECK-NEXT: [[ICMP6:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[SUB]](s64), [[C1]]
-    ; CHECK-NEXT: [[TRUNC6:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP6]](s32)
     ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[MV2]], [[SUB]](s64)
     ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[C1]], [[SUB5]](s64)
     ; CHECK-NEXT: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[SHL2]]
     ; CHECK-NEXT: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[C1]], [[SUB4]](s64)
-    ; CHECK-NEXT: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC5]](s1), [[OR2]], [[LSHR3]]
-    ; CHECK-NEXT: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC6]](s1), [[MV2]], [[SELECT3]]
-    ; CHECK-NEXT: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[OR1]], [[SELECT4]]
-    ; CHECK-NEXT: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[MV]], [[SELECT5]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP5]], [[C4]]
+    ; CHECK-NEXT: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[AND3]](s32), [[OR2]], [[LSHR3]]
+    ; CHECK-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[ICMP6]], [[C4]]
+    ; CHECK-NEXT: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[AND4]](s32), [[MV2]], [[SELECT3]]
+    ; CHECK-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C4]]
+    ; CHECK-NEXT: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[AND5]](s32), [[OR1]], [[SELECT4]]
+    ; CHECK-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[ICMP1]], [[C4]]
+    ; CHECK-NEXT: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[AND6]](s32), [[MV]], [[SELECT5]]
     ; CHECK-NEXT: %d1:_(s32), %d2:_(s32) = G_UNMERGE_VALUES [[SELECT6]](s64)
     ; CHECK-NEXT: $w0 = COPY %d2(s32)
     %0:_(s64) = COPY $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
index 91ef658a653a..c1bf5cd33bea 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-simple.mir
@@ -8,38 +8,51 @@ body:             |
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $x0
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s64)
-  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[COPY]](s64)
   ; CHECK-NEXT:   [[PTRTOINT:%[0-9]+]]:_(s64) = G_PTRTOINT [[INTTOPTR]](p0)
   ; CHECK-NEXT:   $x0 = COPY [[PTRTOINT]](s64)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK-NEXT:   [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-  ; CHECK-NEXT:   [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC2]], [[TRUNC3]]
-  ; CHECK-NEXT:   $w0 = COPY [[SELECT]](s32)
+  ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC4]], [[C1]]
+  ; CHECK-NEXT:   [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND1]](s32), [[TRUNC2]], [[TRUNC3]]
+  ; CHECK-NEXT:   $w0 = COPY [[SELECT]](s32)
   ; CHECK-NEXT:   [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-  ; CHECK-NEXT:   [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC4]], [[TRUNC5]]
-  ; CHECK-NEXT:   $w0 = COPY [[SELECT1]](s32)
   ; CHECK-NEXT:   [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK-NEXT:   [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
-  ; CHECK-NEXT:   [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC6]], [[TRUNC7]]
+  ; CHECK-NEXT:   [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C1]]
+  ; CHECK-NEXT:   [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND2]](s32), [[TRUNC5]], [[TRUNC6]]
+  ; CHECK-NEXT:   $w0 = COPY [[SELECT1]](s32)
+  ; CHECK-NEXT:   [[TRUNC8:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[TRUNC9:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[TRUNC10:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC10]], [[C1]]
+  ; CHECK-NEXT:   [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[TRUNC8]], [[TRUNC9]]
   ; CHECK-NEXT:   $w0 = COPY [[SELECT2]](s32)
-  ; CHECK-NEXT:   [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[TRUNC1]], [[TRUNC1]]
-  ; CHECK-NEXT:   [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[COPY]], [[COPY]]
+  ; CHECK-NEXT:   [[TRUNC11:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[AND4:%[0-9]+]]:_(s32) = G_AND [[TRUNC11]], [[C1]]
+  ; CHECK-NEXT:   [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[AND4]](s32), [[TRUNC]], [[TRUNC]]
+  ; CHECK-NEXT:   [[TRUNC12:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+  ; CHECK-NEXT:   [[AND5:%[0-9]+]]:_(s32) = G_AND [[TRUNC12]], [[C1]]
+  ; CHECK-NEXT:   [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[AND5]](s32), [[COPY]], [[COPY]]
   ; CHECK-NEXT:   $x0 = COPY [[SELECT4]](s64)
   ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(<2 x s32>) = G_BITCAST [[COPY]](s64)
   ; CHECK-NEXT:   [[BITCAST1:%[0-9]+]]:_(s64) = G_BITCAST [[BITCAST]](<2 x s32>)
   ; CHECK-NEXT:   $x0 = COPY [[BITCAST1]](s64)
   ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[SELECT3]](s32)
   ; CHECK-NEXT:   $w0 = COPY [[BITCAST2]](<2 x s16>)
-  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<4 x s8>) = G_BITCAST [[TRUNC1]](s32)
+  ; CHECK-NEXT:   [[BITCAST3:%[0-9]+]]:_(<4 x s8>) = G_BITCAST [[TRUNC]](s32)
   ; CHECK-NEXT:   [[BITCAST4:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST3]](<4 x s8>)
   ; CHECK-NEXT:   $w0 = COPY [[BITCAST4]](s32)
-  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[TRUNC1]](s32)
+  ; CHECK-NEXT:   [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[TRUNC]](s32)
   ; CHECK-NEXT:   [[BITCAST6:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST5]](<2 x s16>)
   ; CHECK-NEXT:   $w0 = COPY [[BITCAST6]](s32)
   bb.0.entry:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssube.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssube.mir
index bf38a206f4b4..4412eed25f7a 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssube.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssube.mir
@@ -11,12 +11,15 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY]], [[COPY2]], %carry_in
-    ; CHECK-NEXT: %19:_(s64), %carry_out:_(s1) = G_SSUBE [[COPY1]], [[COPY3]], [[USUBE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY]], [[COPY2]], %24
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[SSUBE:%[0-9]+]]:_(s64), [[SSUBE1:%[0-9]+]]:_(s32) = G_SSUBE [[COPY1]], [[COPY3]], %22
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SSUBE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBE]](s64)
-    ; CHECK-NEXT: $x1 = COPY %19(s64)
+    ; CHECK-NEXT: $x1 = COPY [[SSUBE]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -44,14 +47,18 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY]], [[COPY1]], %carry_in
-    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY2]], [[USUBE1]]
-    ; CHECK-NEXT: %24:_(s64), %carry_out:_(s1) = G_SSUBE [[COPY2]], [[COPY3]], [[USUBE3]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY]], [[COPY1]], %31
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY2]], %29
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
+    ; CHECK-NEXT: [[SSUBE:%[0-9]+]]:_(s64), [[SSUBE1:%[0-9]+]]:_(s32) = G_SSUBE [[COPY2]], [[COPY3]], %27
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[USUBE3]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SSUBE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBE]](s64)
     ; CHECK-NEXT: $x1 = COPY [[USUBE2]](s64)
-    ; CHECK-NEXT: $x2 = COPY %24(s64)
+    ; CHECK-NEXT: $x2 = COPY [[SSUBE]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -78,12 +85,14 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY2]](s64)
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 8
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 8
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[SEXT_INREG]], [[SEXT_INREG1]], %carry_in
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[SEXT_INREG]], [[SEXT_INREG1]], %18
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[USUBE]], 8
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[USUBE]](s32), [[SEXT_INREG2]]
     ; CHECK-NEXT: %sub_ext:_(s64) = G_ANYEXT [[USUBE]](s32)
@@ -111,11 +120,11 @@ body:             |
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: %sub:_(s32), %carry_out:_(s1) = G_SSUBE %lhs, %rhs, %carry_in
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %sub:_(s32), %7:_(s32) = G_SSUBE %lhs, %rhs, %8
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
     ; CHECK-NEXT: $w0 = COPY %sub(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %7(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %2:_(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubo.mir
index 86aaa0e1413f..44e4fd9bfa79 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubo.mir
@@ -10,11 +10,13 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: %17:_(s64), %carry_out:_(s1) = G_SSUBE [[COPY1]], [[COPY3]], [[USUBO1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[SSUBE:%[0-9]+]]:_(s64), [[SSUBE1:%[0-9]+]]:_(s32) = G_SSUBE [[COPY1]], [[COPY3]], %20
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SSUBE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBO]](s64)
-    ; CHECK-NEXT: $x1 = COPY %17(s64)
+    ; CHECK-NEXT: $x1 = COPY [[SSUBE]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -39,13 +41,16 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY2]], [[USUBO1]]
-    ; CHECK-NEXT: %22:_(s64), %carry_out:_(s1) = G_SSUBE [[COPY2]], [[COPY3]], [[USUBE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY2]], %27
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C]]
+    ; CHECK-NEXT: [[SSUBE:%[0-9]+]]:_(s64), [[SSUBE1:%[0-9]+]]:_(s32) = G_SSUBE [[COPY2]], [[COPY3]], %25
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[SSUBE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[USUBE]](s64)
-    ; CHECK-NEXT: $x2 = COPY %22(s64)
+    ; CHECK-NEXT: $x2 = COPY [[SSUBE]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -98,10 +103,9 @@ body:             |
     ; CHECK-LABEL: name: test_scalar_ssubo_32
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
-    ; CHECK-NEXT: %sub:_(s32), %carry_out:_(s1) = G_SSUBO %lhs, %rhs
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %sub:_(s32), %5:_(s32) = G_SSUBO %lhs, %rhs
     ; CHECK-NEXT: $w0 = COPY %sub(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %5(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %sub:_(s32), %carry_out:_(s1) = G_SSUBO %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubsat.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubsat.mir
index 10d365f993b2..91aa787c4b3f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubsat.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-ssubsat.mir
@@ -13,12 +13,14 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %x:_(s32) = COPY $w0
     ; CHECK-NEXT: %y:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[SSUBO:%[0-9]+]]:_(s32), [[SSUBO1:%[0-9]+]]:_(s1) = G_SSUBO %x, %y
+    ; CHECK-NEXT: [[SSUBO:%[0-9]+]]:_(s32), [[SSUBO1:%[0-9]+]]:_(s32) = G_SSUBO %x, %y
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SSUBO]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: %ssubsat:_(s32) = G_SELECT [[SSUBO1]](s1), [[ADD]], [[SSUBO]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SSUBO1]], [[C2]]
+    ; CHECK-NEXT: %ssubsat:_(s32) = G_SELECT [[AND]](s32), [[ADD]], [[SSUBO]]
     ; CHECK-NEXT: $w0 = COPY %ssubsat(s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %x:_(s32) = COPY $w0
@@ -39,12 +41,14 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %x:_(s64) = COPY $x0
     ; CHECK-NEXT: %y:_(s64) = COPY $x1
-    ; CHECK-NEXT: [[SSUBO:%[0-9]+]]:_(s64), [[SSUBO1:%[0-9]+]]:_(s1) = G_SSUBO %x, %y
+    ; CHECK-NEXT: [[SSUBO:%[0-9]+]]:_(s64), [[SSUBO1:%[0-9]+]]:_(s32) = G_SSUBO %x, %y
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SSUBO]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: %ssubsat:_(s64) = G_SELECT [[SSUBO1]](s1), [[ADD]], [[SSUBO]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SSUBO1]], [[C2]]
+    ; CHECK-NEXT: %ssubsat:_(s64) = G_SELECT [[AND]](s32), [[ADD]], [[SSUBO]]
     ; CHECK-NEXT: $x0 = COPY %ssubsat(s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %x:_(s64) = COPY $x0
@@ -71,13 +75,14 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[SUB]], 16
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[SUB]], 16
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG3]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ADD]], [[SUB]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ADD]], [[SUB]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy_1:_(s32) = COPY $w0
@@ -107,13 +112,13 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[SUB]], 1
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[SUB]], 1
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG3]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ADD]], [[SUB]]
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ADD]], [[SUB]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy_1:_(s32) = COPY $w0
@@ -143,13 +148,14 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[SUB]], 3
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s32) = G_SEXT_INREG [[SUB]], 3
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT_INREG3]], [[C]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[ADD]], [[SUB]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[ADD]], [[SUB]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy_1:_(s32) = COPY $w0
@@ -179,14 +185,15 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[SEXT_INREG]], [[SEXT_INREG1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 36
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s64), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 35
     ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 36
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[C]](s64)
     ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SEXT_INREG3]], [[COPY]](s64)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 34359738368
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC]](s1), [[ADD]], [[SUB]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[ADD]], [[SUB]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy_1:_(s64) = COPY $x0
@@ -215,15 +222,16 @@ body:             |
     ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy_1(s128)
     ; CHECK-NEXT: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT %copy_2(s128), 0
     ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES %copy_2(s128)
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[EXTRACT]], [[EXTRACT1]]
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[EXTRACT]], [[EXTRACT1]]
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[UV1]](s64)
     ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC]], 24
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[UV3]](s64)
     ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[TRUNC1]], 24
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[SEXT_INREG]], [[SEXT_INREG1]], [[USUBO1]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[SEXT_INREG]], [[SEXT_INREG1]], %85
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C1]]
     ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[USUBE]], 24
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[USUBE]](s32), [[SEXT_INREG2]]
-    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8), [[UV8:%[0-9]+]]:_(s8), [[UV9:%[0-9]+]]:_(s8), [[UV10:%[0-9]+]]:_(s8), [[UV11:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[USUBO]](s64)
     ; CHECK-NEXT: [[UV12:%[0-9]+]]:_(s8), [[UV13:%[0-9]+]]:_(s8), [[UV14:%[0-9]+]]:_(s8), [[UV15:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[USUBE]](s32)
     ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
@@ -233,19 +241,20 @@ body:             |
     ; CHECK-NEXT: [[MV1:%[0-9]+]]:_(s32) = G_MERGE_VALUES [[UV8]](s8), [[UV9]](s8), [[UV10]](s8), [[UV11]](s8)
     ; CHECK-NEXT: [[MV2:%[0-9]+]]:_(s32) = G_MERGE_VALUES [[UV12]](s8), [[UV13]](s8), [[UV14]](s8), [[DEF]](s8)
     ; CHECK-NEXT: [[MV3:%[0-9]+]]:_(s32) = G_MERGE_VALUES [[UV16]](s8), [[UV17]](s8), [[UV18]](s8), [[UV16]](s8)
-    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
     ; CHECK-NEXT: [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MV]](s32), [[MV1]](s32)
     ; CHECK-NEXT: [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MV2]](s32), [[MV3]](s32)
-    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV5]], [[C1]](s64)
-    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV4]], [[C2]](s64)
+    ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[MV5]], [[C2]](s64)
+    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[MV4]], [[C3]](s64)
     ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL]], [[LSHR]]
-    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[OR]], [[C1]](s64)
-    ; CHECK-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 23
-    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[C3]](s64)
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[ASHR1]], [[C]]
+    ; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[OR]], [[C2]](s64)
+    ; CHECK-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 23
+    ; CHECK-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[ASHR]], [[C4]](s64)
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[ASHR1]], [[C]]
     ; CHECK-NEXT: [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[MV]](s32), [[MV1]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UADDO]], [[MV6]]
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND1]](s32), [[UADDO]], [[MV6]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %copy_1:_(s128) = COPY $q0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
index 0d4d3bffff6d..4ffac7f157fb 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-sub.mir
@@ -10,8 +10,10 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY3]], [[USUBO1]]
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY3]], %18
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C]]
     ; CHECK-NEXT: $x0 = COPY [[USUBO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[USUBE]](s64)
     %0:_(s64) = COPY $x0
@@ -35,9 +37,12 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY2]], [[USUBO1]]
-    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[COPY2]], [[COPY3]], [[USUBE1]]
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY2]], %34
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C]]
+    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s32) = G_USUBE [[COPY2]], [[COPY3]], %32
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
     ; CHECK-NEXT: $x0 = COPY [[USUBO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[USUBE]](s64)
     ; CHECK-NEXT: $x2 = COPY [[USUBE2]](s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadd-sat.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadd-sat.mir
index dc3c003f7d00..9dbbd1454154 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadd-sat.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadd-sat.mir
@@ -16,9 +16,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[UADDO1]](s1), [[C]], [[UADDO]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[C]], [[UADDO]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
@@ -44,9 +46,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[UADDO1]](s1), [[C]], [[UADDO]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[C]], [[UADDO]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
@@ -78,9 +82,10 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[AND1]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s32), [[AND2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C1]], [[ADD]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[C1]], [[ADD]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %2:_(s32) = COPY $w0
@@ -115,9 +120,10 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[AND1]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s32), [[AND2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C1]], [[ADD]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[C1]], [[ADD]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %2:_(s32) = COPY $w0
@@ -155,9 +161,10 @@ body:             |
     ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AND]], [[AND1]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[ADD]](s32), [[AND2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[COPY2]], [[ADD]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[COPY2]], [[ADD]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %2:_(s32) = COPY $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadde.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadde.mir
index 74fa7256f133..48bf05010561 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadde.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uadde.mir
@@ -11,12 +11,15 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY]], [[COPY2]], %carry_in
-    ; CHECK-NEXT: %19:_(s64), %carry_out:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY]], [[COPY2]], %24
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY3]], %22
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[UADDE3]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDE]](s64)
-    ; CHECK-NEXT: $x1 = COPY %19(s64)
+    ; CHECK-NEXT: $x1 = COPY [[UADDE2]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -44,14 +47,18 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY]], [[COPY1]], %carry_in
-    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY2]], [[UADDE1]]
-    ; CHECK-NEXT: %24:_(s64), %carry_out:_(s1) = G_UADDE [[COPY2]], [[COPY3]], [[UADDE3]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY]], [[COPY1]], %31
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY2]], %29
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
+    ; CHECK-NEXT: [[UADDE4:%[0-9]+]]:_(s64), [[UADDE5:%[0-9]+]]:_(s32) = G_UADDE [[COPY2]], [[COPY3]], %27
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UADDE3]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[UADDE5]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDE]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE2]](s64)
-    ; CHECK-NEXT: $x2 = COPY %24(s64)
+    ; CHECK-NEXT: $x2 = COPY [[UADDE4]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -78,15 +85,17 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY2]](s64)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[AND]], [[AND1]], %carry_in
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UADDE]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UADDE]](s32), [[AND2]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[AND]], [[AND1]], %18
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C1]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UADDE]], [[C]]
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[UADDE]](s32), [[AND3]]
     ; CHECK-NEXT: %add_ext:_(s64) = G_ANYEXT [[UADDE]](s32)
     ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[ICMP]](s32)
     ; CHECK-NEXT: $x0 = COPY %add_ext(s64)
@@ -112,11 +121,11 @@ body:             |
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: %add:_(s32), %carry_out:_(s1) = G_UADDE %lhs, %rhs, %carry_in
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %add:_(s32), %7:_(s32) = G_UADDE %lhs, %rhs, %8
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
     ; CHECK-NEXT: $w0 = COPY %add(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %7(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %2:_(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uaddo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uaddo.mir
index c032e16c5396..75339098ce64 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uaddo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-uaddo.mir
@@ -10,11 +10,13 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: %17:_(s64), %carry_out:_(s1) = G_UADDE [[COPY1]], [[COPY3]], [[UADDO1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY3]], %20
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[UADDE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
-    ; CHECK-NEXT: $x1 = COPY %17(s64)
+    ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -39,13 +41,16 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[COPY1]], [[COPY2]], [[UADDO1]]
-    ; CHECK-NEXT: %22:_(s64), %carry_out:_(s1) = G_UADDE [[COPY2]], [[COPY3]], [[UADDE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s32) = G_UADDO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s32) = G_UADDE [[COPY1]], [[COPY2]], %27
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UADDO1]], [[C]]
+    ; CHECK-NEXT: [[UADDE2:%[0-9]+]]:_(s64), [[UADDE3:%[0-9]+]]:_(s32) = G_UADDE [[COPY2]], [[COPY3]], %25
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UADDE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[UADDE3]](s32)
     ; CHECK-NEXT: $x0 = COPY [[UADDO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[UADDE]](s64)
-    ; CHECK-NEXT: $x2 = COPY %22(s64)
+    ; CHECK-NEXT: $x2 = COPY [[UADDE2]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -99,10 +104,9 @@ body:             |
     ; CHECK-LABEL: name: test_scalar_uaddo_32
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
-    ; CHECK-NEXT: %add:_(s32), %carry_out:_(s1) = G_UADDO %lhs, %rhs
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %add:_(s32), %5:_(s32) = G_UADDO %lhs, %rhs
     ; CHECK-NEXT: $w0 = COPY %add(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %5(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %add:_(s32), %carry_out:_(s1) = G_UADDO %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usub-sat.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usub-sat.mir
index f2e8692be339..5fc30343b42f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usub-sat.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usub-sat.mir
@@ -16,9 +16,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[USUBO1]](s1), [[C]], [[USUBO]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[C]], [[USUBO]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:_(s32) = COPY $w0
@@ -44,9 +46,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY1]]
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[USUBO1]](s1), [[C]], [[USUBO]]
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C1]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[AND]](s32), [[C]], [[USUBO]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:_(s64) = COPY $x0
@@ -78,9 +82,10 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[AND]], [[AND1]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s32), [[AND2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C1]], [[SUB]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[C1]], [[SUB]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %2:_(s32) = COPY $w0
@@ -115,9 +120,10 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[AND]], [[AND1]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s32), [[AND2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C1]], [[SUB]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[C1]], [[SUB]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %2:_(s32) = COPY $w0
@@ -152,9 +158,10 @@ body:             |
     ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[AND]], [[AND1]]
     ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C]]
     ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[SUB]](s32), [[AND2]]
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[TRUNC]](s1), [[C1]], [[SUB]]
+    ; CHECK-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[ICMP]], [[C2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND3]](s32), [[C1]], [[SUB]]
     ; CHECK-NEXT: $w0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %2:_(s32) = COPY $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usube.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usube.mir
index 3e03a3ac3d4a..55fb822e0126 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usube.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usube.mir
@@ -11,12 +11,15 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY]], [[COPY2]], %carry_in
-    ; CHECK-NEXT: %19:_(s64), %carry_out:_(s1) = G_USUBE [[COPY1]], [[COPY3]], [[USUBE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY]], [[COPY2]], %24
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY3]], %22
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[USUBE3]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBE]](s64)
-    ; CHECK-NEXT: $x1 = COPY %19(s64)
+    ; CHECK-NEXT: $x1 = COPY [[USUBE2]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -44,14 +47,18 @@ body:             |
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
     ; CHECK-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x4
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY4]](s64)
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY]], [[COPY1]], %carry_in
-    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY2]], [[USUBE1]]
-    ; CHECK-NEXT: %24:_(s64), %carry_out:_(s1) = G_USUBE [[COPY2]], [[COPY3]], [[USUBE3]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY]], [[COPY1]], %31
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY4]](s64)
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY2]], %29
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
+    ; CHECK-NEXT: [[USUBE4:%[0-9]+]]:_(s64), [[USUBE5:%[0-9]+]]:_(s32) = G_USUBE [[COPY2]], [[COPY3]], %27
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[USUBE3]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[USUBE5]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBE]](s64)
     ; CHECK-NEXT: $x1 = COPY [[USUBE2]](s64)
-    ; CHECK-NEXT: $x2 = COPY %24(s64)
+    ; CHECK-NEXT: $x2 = COPY [[USUBE4]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -78,15 +85,17 @@ body:             |
     ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY2]](s64)
     ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
     ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
     ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
     ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
     ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C]]
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[AND]], [[AND1]], %carry_in
-    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[USUBE]], [[C]]
-    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[USUBE]](s32), [[AND2]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[AND]], [[AND1]], %18
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s64)
+    ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C1]]
+    ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[USUBE]], [[C]]
+    ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ne), [[USUBE]](s32), [[AND3]]
     ; CHECK-NEXT: %sub_ext:_(s64) = G_ANYEXT [[USUBE]](s32)
     ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[ICMP]](s32)
     ; CHECK-NEXT: $x0 = COPY %sub_ext(s64)
@@ -112,11 +121,11 @@ body:             |
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $w2
-    ; CHECK-NEXT: %carry_in:_(s1) = G_TRUNC [[COPY]](s32)
-    ; CHECK-NEXT: %sub:_(s32), %carry_out:_(s1) = G_USUBE %lhs, %rhs, %carry_in
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %sub:_(s32), %7:_(s32) = G_USUBE %lhs, %rhs, %8
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
     ; CHECK-NEXT: $w0 = COPY %sub(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %7(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %2:_(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usubo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usubo.mir
index 7037fc2f5c27..b44dff1021d5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usubo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-usubo.mir
@@ -10,11 +10,13 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY2]]
-    ; CHECK-NEXT: %17:_(s64), %carry_out:_(s1) = G_USUBE [[COPY1]], [[COPY3]], [[USUBO1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY2]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY3]], %20
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[USUBE1]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBO]](s64)
-    ; CHECK-NEXT: $x1 = COPY %17(s64)
+    ; CHECK-NEXT: $x1 = COPY [[USUBE]](s64)
     ; CHECK-NEXT: $x2 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -39,13 +41,16 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x3
-    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[COPY]], [[COPY1]]
-    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[COPY1]], [[COPY2]], [[USUBO1]]
-    ; CHECK-NEXT: %22:_(s64), %carry_out:_(s1) = G_USUBE [[COPY2]], [[COPY3]], [[USUBE1]]
-    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s32) = G_USUBO [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s32) = G_USUBE [[COPY1]], [[COPY2]], %27
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[USUBO1]], [[C]]
+    ; CHECK-NEXT: [[USUBE2:%[0-9]+]]:_(s64), [[USUBE3:%[0-9]+]]:_(s32) = G_USUBE [[COPY2]], [[COPY3]], %25
+    ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[USUBE1]], [[C]]
+    ; CHECK-NEXT: %carry_out_ext:_(s64) = G_ANYEXT [[USUBE3]](s32)
     ; CHECK-NEXT: $x0 = COPY [[USUBO]](s64)
     ; CHECK-NEXT: $x1 = COPY [[USUBE]](s64)
-    ; CHECK-NEXT: $x2 = COPY %22(s64)
+    ; CHECK-NEXT: $x2 = COPY [[USUBE2]](s64)
     ; CHECK-NEXT: $x3 = COPY %carry_out_ext(s64)
     %0:_(s64) = COPY $x0
     %1:_(s64) = COPY $x1
@@ -99,10 +104,9 @@ body:             |
     ; CHECK-LABEL: name: test_scalar_usubo_32
     ; CHECK: %lhs:_(s32) = COPY $w0
     ; CHECK-NEXT: %rhs:_(s32) = COPY $w1
-    ; CHECK-NEXT: %sub:_(s32), %carry_out:_(s1) = G_USUBO %lhs, %rhs
-    ; CHECK-NEXT: %carry_out_ext:_(s32) = G_ANYEXT %carry_out(s1)
+    ; CHECK-NEXT: %sub:_(s32), %5:_(s32) = G_USUBO %lhs, %rhs
     ; CHECK-NEXT: $w0 = COPY %sub(s32)
-    ; CHECK-NEXT: $w1 = COPY %carry_out_ext(s32)
+    ; CHECK-NEXT: $w1 = COPY %5(s32)
     %lhs:_(s32) = COPY $w0
     %rhs:_(s32) = COPY $w1
     %sub:_(s32), %carry_out:_(s1) = G_USUBO %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner-zext-trunc-crash.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner-zext-trunc-crash.mir
index 37832293b81b..3fc7505e44e0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner-zext-trunc-crash.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-combiner-zext-trunc-crash.mir
@@ -16,7 +16,7 @@ body:             |
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI %32(s16), %bb.2, [[DEF]](s16), %bb.0
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s16) = G_PHI %33(s16), %bb.2, [[DEF]](s16), %bb.0
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
   ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
@@ -33,13 +33,14 @@ body:             |
   ; CHECK-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 26
   ; CHECK-NEXT:   [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND2]](s32), [[C4]]
   ; CHECK-NEXT:   [[OR1:%[0-9]+]]:_(s32) = G_OR [[ICMP1]], [[OR]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[OR1]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.2
+  ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+  ; CHECK-NEXT:   [[AND3:%[0-9]+]]:_(s32) = G_AND [[OR1]], [[C5]]
+  ; CHECK-NEXT:   G_BRCOND [[AND3]](s32), %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 64
+  ; CHECK-NEXT:   [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 64
   ; CHECK-NEXT:   G_BR %bb.1
   bb.1:
     %1:_(s8) = G_CONSTANT i8 46

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
index 298c8b749a89..1ecc10626595 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/localizer-arm64-tti.ll
@@ -28,8 +28,8 @@ define i32 @foo() {
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV]](p0) :: (dereferenceable load (s32) from @var1)
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[LOAD]](s32), [[C3]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.3
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:gpr(s32) = G_AND [[ICMP]], [[C3]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.3
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.if.then:
@@ -84,8 +84,8 @@ define i32 @darwin_tls() {
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[GV2]](p0) :: (dereferenceable load (s32) from @var1)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:gpr(s32) = G_ICMP intpred(ne), [[LOAD]](s32), [[C1]]
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s32)
-  ; CHECK-NEXT:   G_BRCOND [[TRUNC]](s1), %bb.3
+  ; CHECK-NEXT:   [[AND:%[0-9]+]]:gpr(s32) = G_AND [[ICMP]], [[C1]]
+  ; CHECK-NEXT:   G_BRCOND [[AND]](s32), %bb.3
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.if.then:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
index 67dff124bcf6..95ae12f6d59d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-and-tbnz-tbz.mir
@@ -33,8 +33,7 @@ body:             |
     %3:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s64) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(ne), %2(s64), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -66,8 +65,7 @@ body:             |
     %3:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s64) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(eq), %2(s64), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -99,8 +97,7 @@ body:             |
     %3:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(ne), %2(s32), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -132,8 +129,7 @@ body:             |
     %3:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(eq), %2(s32), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -166,8 +162,7 @@ body:             |
     %3:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(slt), %2(s32), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -200,8 +195,7 @@ body:             |
     %3:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(sgt), %2(s32), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -233,8 +227,7 @@ body:             |
     %3:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s64) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(ne), %2(s64), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -267,8 +260,7 @@ body:             |
     %3:gpr(s64) = G_CONSTANT i64 4
     %2:gpr(s64) = G_AND %0, %1
     %5:gpr(s32) = G_ICMP intpred(ne), %2(s64), %3
-    %4:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %4(s1), %bb.1
+    G_BRCOND %5, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
index 2562ba299ba2..c9749b84d8a5 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-and-tbz-tbnz.mir
@@ -33,8 +33,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -68,8 +67,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -105,8 +103,7 @@ body:             |
     %fold_me:gpr(s64) = G_AND %copy, %fold_cst
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
index a2a4ace5aadc..5b8a0a05e1b3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir
@@ -55,8 +55,7 @@ body:             |
     %6:gpr(s32) = G_CONSTANT i32 1
     %3:gpr(s32) = G_SUB %2, %1
     %7:gpr(s32) = G_ICMP intpred(ne), %0(s32), %3
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -86,8 +85,7 @@ body:             |
     %6:gpr(s32) = G_CONSTANT i32 1
     %3:gpr(s32) = G_SUB %2, %0
     %7:gpr(s32) = G_ICMP intpred(ne), %3(s32), %1
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -118,8 +116,7 @@ body:             |
     %6:gpr(s32) = G_CONSTANT i32 1
     %3:gpr(s32) = G_SUB %2, %1
     %7:gpr(s32) = G_ICMP intpred(slt), %0(s32), %3
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -150,8 +147,7 @@ body:             |
     %6:gpr(s32) = G_CONSTANT i32 1
     %3:gpr(s32) = G_SUB %2, %0
     %7:gpr(s32) = G_ICMP intpred(slt), %3(s32), %1
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -181,8 +177,7 @@ body:             |
     %6:gpr(s64) = G_CONSTANT i64 1
     %3:gpr(s64) = G_SUB %2, %1
     %7:gpr(s32) = G_ICMP intpred(ne), %0(s64), %3
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s64) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s64) = G_SELECT %7, %6, %2
     $x0 = COPY %5(s64)
     RET_ReallyLR implicit $x0
 
@@ -212,8 +207,7 @@ body:             |
     %6:gpr(s64) = G_CONSTANT i64 1
     %3:gpr(s64) = G_SUB %2, %0
     %7:gpr(s32) = G_ICMP intpred(ne), %3(s64), %1
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s64) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s64) = G_SELECT %7, %6, %2
     $x0 = COPY %5(s64)
     RET_ReallyLR implicit $x0
 
@@ -244,8 +238,7 @@ body:             |
     %6:gpr(s64) = G_CONSTANT i64 1
     %3:gpr(s64) = G_SUB %2, %1
     %7:gpr(s32) = G_ICMP intpred(slt), %0(s64), %3
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s64) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s64) = G_SELECT %7, %6, %2
     $x0 = COPY %5(s64)
     RET_ReallyLR implicit $x0
 
@@ -276,8 +269,7 @@ body:             |
     %6:gpr(s64) = G_CONSTANT i64 1
     %3:gpr(s64) = G_SUB %2, %0
     %7:gpr(s32) = G_ICMP intpred(slt), %3(s64), %1
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s64) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s64) = G_SELECT %7, %6, %2
     $x0 = COPY %5(s64)
     RET_ReallyLR implicit $x0
 
@@ -306,8 +298,7 @@ body:             |
     %3:gpr(s32) = G_AND %2, %1
     %8:gpr(s32) = G_CONSTANT i32 0
     %7:gpr(s32) = G_ICMP intpred(eq), %3(s32), %8
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -336,8 +327,7 @@ body:             |
     %3:gpr(s64) = G_AND %2, %1
     %8:gpr(s64) = G_CONSTANT i64 0
     %7:gpr(s32) = G_ICMP intpred(eq), %3(s64), %8
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s64) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s64) = G_SELECT %7, %6, %2
     $x0 = COPY %5(s64)
     RET_ReallyLR implicit $x0
 
@@ -367,8 +357,7 @@ body:             |
     %3:gpr(s32) = G_AND %2, %1
     %8:gpr(s32) = G_CONSTANT i32 0
     %7:gpr(s32) = G_ICMP intpred(ugt), %3(s32), %8
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -398,8 +387,7 @@ body:             |
     %3:gpr(s32) = G_AND %2, %1
     %8:gpr(s32) = G_CONSTANT i32 42
     %7:gpr(s32) = G_ICMP intpred(ugt), %3(s32), %8
-    %4:gpr(s1) = G_TRUNC %7(s32)
-    %5:gpr(s32) = G_SELECT %4(s1), %6, %2
+    %5:gpr(s32) = G_SELECT %7, %6, %2
     $w0 = COPY %5(s32)
     RET_ReallyLR implicit $w0
 
@@ -521,8 +509,7 @@ body:             |
     %shift:gpr(s64) = G_SHL %copy(s64), %cst(s64)
     %and:gpr(s64) = G_AND %zero, %shift
     %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    %select:gpr(s64) = G_SELECT %cmp_trunc(s1), %one, %zero
+    %select:gpr(s64) = G_SELECT %cmp, %one, %zero
     $x0 = COPY %select(s64)
     RET_ReallyLR implicit $x0
 
@@ -553,8 +540,7 @@ body:             |
     %shift:gpr(s32) = G_SHL %copy(s32), %cst(s32)
     %and:gpr(s32) = G_AND %zero, %shift
     %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    %select:gpr(s32) = G_SELECT %cmp_trunc(s1), %one, %zero
+    %select:gpr(s32) = G_SELECT %cmp, %one, %zero
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir
index ba859daa7609..85c2b61aa674 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-ext-tbz-tbnz.mir
@@ -31,8 +31,7 @@ body:             |
     %fold_me:gpr(s64) = G_ZEXT %copy(s32)
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -64,8 +63,7 @@ body:             |
     %fold_me:gpr(s64) = G_ANYEXT %copy(s32)
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -100,8 +98,7 @@ body:             |
     %ext2:gpr(s64) = G_ANYEXT %ext1(s32)
     %and:gpr(s64) = G_AND %ext2, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -138,8 +135,7 @@ body:             |
     %zext:gpr(s64) = G_ZEXT %copy(s32)
     %and:gpr(s64) = G_AND %zext, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     $x0 = COPY %zext:gpr(s64)

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir
index 62b8edcb1886..b32508ed12c6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-shift-tbz-tbnz.mir
@@ -35,8 +35,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -74,8 +73,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -114,8 +112,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -155,8 +152,7 @@ body:             |
     %shl:gpr(s64) = G_SHL %copy, %fold_cst
     %and:gpr(s64) = G_AND %shl, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
 
   bb.1:
@@ -198,8 +194,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -235,8 +230,7 @@ body:             |
 
     %and:gpr(s32) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -272,8 +266,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -309,8 +302,7 @@ body:             |
 
     %and:gpr(s32) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -347,8 +339,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -387,8 +378,7 @@ body:             |
 
     %and:gpr(s32) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -425,8 +415,7 @@ body:             |
 
     %and:gpr(s32) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
index 1e518717e510..bcda5c98b8cd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-trunc-tbz-tbnz.mir
@@ -28,8 +28,7 @@ body:             |
     %fold_me:gpr(s32) = G_TRUNC %copy(s64)
     %and:gpr(s32) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir
index 97a867c6a114..ed24193b5826 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-xor-tbz-tbnz.mir
@@ -38,8 +38,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -76,8 +75,7 @@ body:             |
     %fold_me:gpr(s64) = G_XOR %copy, %fold_cst
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -119,8 +117,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -153,8 +150,7 @@ body:             |
     %5:gpr(s64) = G_ZEXT %4(s32)
     %15:gpr(s64) = G_CONSTANT i64 0
     %13:gpr(s32) = G_ICMP intpred(slt), %5(s64), %15
-    %7:gpr(s1) = G_TRUNC %13(s32)
-    G_BRCOND %7(s1), %bb.1
+    G_BRCOND %13, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -191,8 +187,7 @@ body:             |
     %fold_me:gpr(s64) = G_XOR %fold_cst, %copy
     %and:gpr(s64) = G_AND %fold_me, %bit
     %cmp:gpr(s32) = G_ICMP intpred(ne), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -230,8 +225,7 @@ body:             |
 
     %and:gpr(s64) = G_AND %xor2, %bit
     %cmp:gpr(s32) = G_ICMP intpred(eq), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
index 0dd72599555c..523eb0c4bc0e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-adjust-icmp-imm.mir
@@ -657,8 +657,7 @@ body:             |
     ; LOWER-NEXT: %reg1:_(s32) = COPY $w1
     ; LOWER-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
     ; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %reg0(s32), [[C]]
-    ; LOWER-NEXT: %trunc:_(s1) = G_TRUNC %cmp(s32)
-    ; LOWER-NEXT: %select:_(s32) = G_SELECT %trunc(s1), %reg0, %reg1
+    ; LOWER-NEXT: %select:_(s32) = G_SELECT %cmp(s32), %reg0, %reg1
     ; LOWER-NEXT: $w0 = COPY %select(s32)
     ; LOWER-NEXT: RET_ReallyLR implicit $w0
     ; SELECT-LABEL: name: cmp_and_select
@@ -674,8 +673,7 @@ body:             |
     %reg1:_(s32) = COPY $w1
     %cst:_(s32) = G_CONSTANT i32 -1
     %cmp:_(s32) = G_ICMP intpred(sle), %reg0(s32), %cst
-    %trunc:_(s1) = G_TRUNC %cmp(s32)
-    %select:_(s32) = G_SELECT %trunc(s1), %reg0, %reg1
+    %select:_(s32) = G_SELECT %cmp, %reg0, %reg1
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
index 0c97233da57b..61a613551b07 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/preselect-process-phis.mir
@@ -39,7 +39,7 @@ body:             |
   bb.0:
     successors: %bb.1(0x80000000)
 
-    %0:gpr(s1) = G_IMPLICIT_DEF
+    %0:gpr(s32) = G_IMPLICIT_DEF
     %4:gpr(p0) = G_IMPLICIT_DEF
     %8:fpr(s32) = G_FCONSTANT float 0x7FF8000000000000
 
@@ -47,7 +47,7 @@ body:             |
     successors: %bb.2(0x80000000)
 
     %6:gpr(s32) = G_IMPLICIT_DEF
-    %7:gpr(s32) = G_SELECT %0(s1), %6, %6
+    %7:gpr(s32) = G_SELECT %0(s32), %6, %6
     %1:gpr(s16) = G_TRUNC %7(s32)
 
   bb.2:
@@ -98,7 +98,7 @@ body:             |
   bb.0:
     successors: %bb.1(0x80000000)
 
-    %0:gpr(s1) = G_IMPLICIT_DEF
+    %0:gpr(s32) = G_IMPLICIT_DEF
     %4:gpr(p0) = G_IMPLICIT_DEF
     %8:fpr(s32) = G_FCONSTANT float 0x7FF8000000000000
 
@@ -106,7 +106,7 @@ body:             |
     successors: %bb.2(0x80000000)
 
     %6:gpr(s32) = G_IMPLICIT_DEF
-    %7:gpr(s32) = G_SELECT %0(s1), %6, %6
+    %7:gpr(s32) = G_SELECT %0(s32), %6, %6
     %1:gpr(s16) = G_TRUNC %7(s32)
 
   bb.2:
@@ -183,14 +183,14 @@ body:             |
     successors: %bb.2, %bb.6
     liveins: $w0, $w1, $x2
     %ptr:gpr(p0) = COPY $x2
-    %cond_1:gpr(s1) = G_IMPLICIT_DEF
+    %cond_1:gpr(s32) = G_IMPLICIT_DEF
     %gpr_1:gpr(s16) = G_IMPLICIT_DEF
-    G_BRCOND %cond_1(s1), %bb.6
+    G_BRCOND %cond_1(s32), %bb.6
     G_BR %bb.2
   bb.2:
     successors: %bb.3, %bb.4
-    %cond_2:gpr(s1) = G_IMPLICIT_DEF
-    G_BRCOND %cond_2(s1), %bb.4
+    %cond_2:gpr(s32) = G_IMPLICIT_DEF
+    G_BRCOND %cond_2(s32), %bb.4
     G_BR %bb.3
   bb.3:
     %gpr_2:gpr(s16) = G_IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-sext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-sext.mir
index b35f45a7bb56..496e01e8f39c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-sext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-sext.mir
@@ -166,20 +166,18 @@ body:             |
     ; CHECK: liveins: $d0, $x1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %w0:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: %cond:gpr(s1) = G_TRUNC %w0(s32)
     ; CHECK-NEXT: %fpr:fpr(s64) = COPY $d0
     ; CHECK-NEXT: %fpr_assert_sext:fpr(s64) = G_ASSERT_SEXT %fpr, 32
     ; CHECK-NEXT: %gpr:gpr(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
-    ; CHECK-NEXT: %select:fpr(s64) = G_SELECT %cond(s1), %fpr_assert_sext, [[COPY]]
+    ; CHECK-NEXT: %select:fpr(s64) = G_SELECT %w0(s32), %fpr_assert_sext, [[COPY]]
     ; CHECK-NEXT: $d0 = COPY %select(s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %w0:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %w0(s32)
     %fpr:_(s64) = COPY $d0
     %fpr_assert_sext:_(s64) = G_ASSERT_SEXT %fpr, 32
     %gpr:_(s64) = COPY $x1
-    %select:_(s64) = G_SELECT %cond(s1), %fpr_assert_sext, %gpr
+    %select:_(s64) = G_SELECT %w0, %fpr_assert_sext, %gpr
     $d0 = COPY %select(s64)
     RET_ReallyLR implicit $d0
 
@@ -200,8 +198,7 @@ body:             |
   ; CHECK-NEXT:   %copy_assert_sext:fpr(s32) = G_ASSERT_SEXT %copy1, 16
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY %copy1(s32)
   ; CHECK-NEXT:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[COPY]](s32), %copy2
-  ; CHECK-NEXT:   %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp_trunc(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cmp(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -225,8 +222,7 @@ body:             |
     %copy_assert_sext:_(s32) = G_ASSERT_SEXT %copy1(s32), 16
 
     %cmp:_(s32) = G_ICMP intpred(eq), %copy1, %copy2
-    %cmp_trunc:_(s1) = G_TRUNC %cmp
-    G_BRCOND %cmp_trunc, %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.1
   bb.1:
     successors: %bb.2
@@ -255,8 +251,7 @@ body:             |
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY %copy1(s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr(s32) = COPY %copy2(s32)
   ; CHECK-NEXT:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
-  ; CHECK-NEXT:   %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp_trunc(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cmp(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -277,8 +272,7 @@ body:             |
     %copy1:_(s32) = COPY $s0
     %copy2:_(s32) = COPY $s1
     %cmp:_(s32) = G_ICMP intpred(eq), %copy1, %copy2
-    %cmp_trunc:_(s1) = G_TRUNC %cmp
-    G_BRCOND %cmp_trunc, %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.1
   bb.1:
     successors: %bb.2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-zext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-zext.mir
index a26a9b1f9eb5..06c221adee12 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-zext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-assert-zext.mir
@@ -166,20 +166,18 @@ body:             |
     ; CHECK: liveins: $d0, $x1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %w0:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: %cond:gpr(s1) = G_TRUNC %w0(s32)
     ; CHECK-NEXT: %fpr:fpr(s64) = COPY $d0
     ; CHECK-NEXT: %fpr_assert_zext:fpr(s64) = G_ASSERT_ZEXT %fpr, 32
     ; CHECK-NEXT: %gpr:gpr(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:fpr(s64) = COPY %gpr(s64)
-    ; CHECK-NEXT: %select:fpr(s64) = G_SELECT %cond(s1), %fpr_assert_zext, [[COPY]]
+    ; CHECK-NEXT: %select:fpr(s64) = G_SELECT %w0(s32), %fpr_assert_zext, [[COPY]]
     ; CHECK-NEXT: $d0 = COPY %select(s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %w0:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %w0(s32)
     %fpr:_(s64) = COPY $d0
     %fpr_assert_zext:_(s64) = G_ASSERT_ZEXT %fpr, 32
     %gpr:_(s64) = COPY $x1
-    %select:_(s64) = G_SELECT %cond(s1), %fpr_assert_zext, %gpr
+    %select:_(s64) = G_SELECT %w0, %fpr_assert_zext, %gpr
     $d0 = COPY %select(s64)
     RET_ReallyLR implicit $d0
 
@@ -200,8 +198,7 @@ body:             |
   ; CHECK-NEXT:   %copy_assert_zext:fpr(s32) = G_ASSERT_ZEXT %copy1, 16
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY %copy1(s32)
   ; CHECK-NEXT:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[COPY]](s32), %copy2
-  ; CHECK-NEXT:   %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp_trunc(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cmp(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -225,8 +222,7 @@ body:             |
     %copy_assert_zext:_(s32) = G_ASSERT_ZEXT %copy1(s32), 16
 
     %cmp:_(s32) = G_ICMP intpred(eq), %copy1, %copy2
-    %cmp_trunc:_(s1) = G_TRUNC %cmp
-    G_BRCOND %cmp_trunc, %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.1
   bb.1:
     successors: %bb.2
@@ -255,8 +251,7 @@ body:             |
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY %copy1(s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr(s32) = COPY %copy2(s32)
   ; CHECK-NEXT:   %cmp:gpr(s32) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
-  ; CHECK-NEXT:   %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-  ; CHECK-NEXT:   G_BRCOND %cmp_trunc(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cmp(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -277,8 +272,7 @@ body:             |
     %copy1:_(s32) = COPY $s0
     %copy2:_(s32) = COPY $s1
     %cmp:_(s32) = G_ICMP intpred(eq), %copy1, %copy2
-    %cmp_trunc:_(s1) = G_TRUNC %cmp
-    G_BRCOND %cmp_trunc, %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.1
   bb.1:
     successors: %bb.2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
index 9f5a57b09301..eb768f3b16da 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-fp-use-def.mir
@@ -91,19 +91,17 @@ body:             |
     ; CHECK: liveins: $w0, $w1, $w2
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC %2(s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $w1
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $w2
     ; CHECK-NEXT: [[SITOFP:%[0-9]+]]:fpr(s32) = G_SITOFP [[COPY1]](s32)
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr(s32) = COPY [[COPY2]](s32)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s32) = G_SELECT [[TRUNC]](s1), [[COPY3]], [[SITOFP]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s32) = G_SELECT [[COPY2]](s32), [[COPY3]], [[SITOFP]]
     ; CHECK-NEXT: [[FPTOSI:%[0-9]+]]:gpr(s32) = G_FPTOSI [[SELECT]](s32)
     %0:_(s32) = COPY $w0
-    %1:_(s1) = G_TRUNC %3(s32)
     %2:_(s32) = COPY $w1
     %3:_(s32) = COPY $w2
     %4:_(s32) = G_SITOFP %2
-    %6:_(s32) = G_SELECT %1(s1), %3, %4
+    %6:_(s32) = G_SELECT %3, %3, %4
     %8:_(s32) = G_FPTOSI %6
 
 ...
@@ -118,10 +116,9 @@ body:             |
   ; CHECK-NEXT:   liveins: $x0, $s0, $s1, $w0, $w1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond_wide:gpr(s32) = COPY $w0
-  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
   ; CHECK-NEXT:   %fpr_copy:fpr(s32) = COPY $s0
   ; CHECK-NEXT:   %ptr:gpr(p0) = COPY $x0
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cond_wide(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -138,10 +135,9 @@ body:             |
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $x0, $s0, $s1, $w0, $w1
     %cond_wide:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %cond_wide(s32)
     %fpr_copy:_(s32) = COPY $s0
     %ptr:_(p0) = COPY $x0
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
   bb.1:
     successors: %bb.2
@@ -164,10 +160,9 @@ body:             |
   ; CHECK-NEXT:   liveins: $x0, $s0, $s1, $w0, $w1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond_wide:gpr(s32) = COPY $w0
-  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
   ; CHECK-NEXT:   %gpr_copy:gpr(s32) = COPY $w1
   ; CHECK-NEXT:   %ptr:gpr(p0) = COPY $x0
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cond_wide(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -184,10 +179,9 @@ body:             |
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $x0, $s0, $s1, $w0, $w1
     %cond_wide:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %cond_wide(s32)
     %gpr_copy:_(s32) = COPY $w1
     %ptr:_(p0) = COPY $x0
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
   bb.1:
     successors: %bb.2
@@ -210,17 +204,16 @@ body:             |
   ; CHECK-NEXT:   liveins: $s0, $s1, $w0, $w1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond_wide:gpr(s32) = COPY $w0
-  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
   ; CHECK-NEXT:   %fpr_copy:fpr(s32) = COPY $s0
   ; CHECK-NEXT:   %gpr_copy:gpr(s32) = COPY $w1
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cond_wide(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:fpr(s32) = COPY %gpr_copy(s32)
-  ; CHECK-NEXT:   %select:fpr(s32) = G_SELECT %cond(s1), %fpr_copy, [[COPY]]
+  ; CHECK-NEXT:   %select:fpr(s32) = G_SELECT %cond_wide(s32), %fpr_copy, [[COPY]]
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
@@ -233,14 +226,13 @@ body:             |
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $s0, $s1, $w0, $w1
     %cond_wide:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %cond_wide(s32)
     %fpr_copy:_(s32) = COPY $s0
     %gpr_copy:_(s32) = COPY $w1
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
   bb.1:
     successors: %bb.2
-    %select:_(s32) = G_SELECT %cond(s1), %fpr_copy, %gpr_copy
+    %select:_(s32) = G_SELECT %cond_wide, %fpr_copy, %gpr_copy
     G_BR %bb.2
   bb.2:
     %phi:_(s32) = G_PHI %fpr_copy(s32), %bb.0, %select(s32), %bb.1
@@ -259,17 +251,16 @@ body:             |
   ; CHECK-NEXT:   liveins: $s0, $s1, $w0, $w1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond_wide:gpr(s32) = COPY $w0
-  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
   ; CHECK-NEXT:   %fpr_copy:fpr(s32) = COPY $s0
   ; CHECK-NEXT:   %gpr_copy:gpr(s32) = COPY $w1
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cond_wide(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr(s32) = COPY %fpr_copy(s32)
-  ; CHECK-NEXT:   %select:gpr(s32) = G_SELECT %cond(s1), [[COPY]], %gpr_copy
+  ; CHECK-NEXT:   %select:gpr(s32) = G_SELECT %cond_wide(s32), [[COPY]], %gpr_copy
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2:
@@ -282,14 +273,13 @@ body:             |
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $s0, $s1, $w0, $w1
     %cond_wide:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %cond_wide(s32)
     %fpr_copy:_(s32) = COPY $s0
     %gpr_copy:_(s32) = COPY $w1
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
   bb.1:
     successors: %bb.2
-    %select:_(s32) = G_SELECT %cond(s1), %fpr_copy, %gpr_copy
+    %select:_(s32) = G_SELECT %cond_wide, %fpr_copy, %gpr_copy
     G_BR %bb.2
   bb.2:
     %phi:_(s32) = G_PHI %gpr_copy(s32), %bb.0, %select(s32), %bb.1
@@ -309,10 +299,9 @@ body:             |
   ; CHECK-NEXT:   liveins: $x0, $s0, $s1, $w0, $w1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond_wide:gpr(s32) = COPY $w0
-  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
   ; CHECK-NEXT:   %fpr_copy:fpr(s32) = COPY $s0
   ; CHECK-NEXT:   %unmerge_src:gpr(s64) = COPY $x0
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cond_wide(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -330,10 +319,10 @@ body:             |
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $x0, $s0, $s1, $w0, $w1
     %cond_wide:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %cond_wide(s32)
+
     %fpr_copy:_(s32) = COPY $s0
     %unmerge_src:_(s64) = COPY $x0
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
   bb.1:
     successors: %bb.2
@@ -356,10 +345,9 @@ body:             |
   ; CHECK-NEXT:   liveins: $x0, $s0, $s1, $w0, $w1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   %cond_wide:gpr(s32) = COPY $w0
-  ; CHECK-NEXT:   %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
   ; CHECK-NEXT:   %gpr_copy:gpr(s32) = COPY $w1
   ; CHECK-NEXT:   %unmerge_src:gpr(s64) = COPY $x0
-  ; CHECK-NEXT:   G_BRCOND %cond(s1), %bb.1
+  ; CHECK-NEXT:   G_BRCOND %cond_wide(s32), %bb.1
   ; CHECK-NEXT:   G_BR %bb.2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
@@ -376,10 +364,9 @@ body:             |
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
     liveins: $x0, $s0, $s1, $w0, $w1
     %cond_wide:_(s32) = COPY $w0
-    %cond:_(s1) = G_TRUNC %cond_wide(s32)
     %gpr_copy:_(s32) = COPY $w1
     %unmerge_src:_(s64) = COPY $x0
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
   bb.1:
     successors: %bb.2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-select.mir
index 40ce8060fd82..e700c834a93c 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbank-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbank-select.mir
@@ -16,17 +16,15 @@ body:             |
     ; CHECK: liveins: $s0, $s1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY]](s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s32) = COPY $s0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s32) = COPY $s1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s32) = G_SELECT [[TRUNC]](s1), [[COPY1]], [[COPY2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s32) = G_SELECT [[COPY]](s32), [[COPY1]], [[COPY2]]
     ; CHECK-NEXT: $s0 = COPY [[SELECT]](s32)
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
     %3:_(s32) = COPY $w0
-    %0:_(s1) = G_TRUNC %3(s32)
     %1:_(s32) = COPY $s0
     %2:_(s32) = COPY $s1
-    %4:_(s32) = G_SELECT %0(s1), %1, %2
+    %4:_(s32) = G_SELECT %3, %1, %2
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -45,17 +43,15 @@ body:             |
     ; CHECK: liveins: $d0, $d1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY]](s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s64) = COPY $d1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s64) = G_SELECT [[TRUNC]](s1), [[COPY1]], [[COPY2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s64) = G_SELECT [[COPY]](s32), [[COPY1]], [[COPY2]]
     ; CHECK-NEXT: $d0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %3:_(s32) = COPY $w0
-    %0:_(s1) = G_TRUNC %3(s32)
     %1:_(s64) = COPY $d0
     %2:_(s64) = COPY $d1
-    %4:_(s64) = G_SELECT %0(s1), %1, %2
+    %4:_(s64) = G_SELECT %3, %1, %2
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 
@@ -80,17 +76,15 @@ body:             |
     ; CHECK: liveins: $d0, $d1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY]](s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr(s64) = COPY $d1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s64) = G_SELECT [[TRUNC]](s1), [[COPY1]], [[COPY2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s64) = G_SELECT [[COPY]](s32), [[COPY1]], [[COPY2]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %3:_(s32) = COPY $w0
-    %0:_(s1) = G_TRUNC %3(s32)
     %1:_(s64) = COPY $d0
     %2:_(s64) = COPY $d1
-    %4:_(s64) = G_SELECT %0(s1), %1, %2
+    %4:_(s64) = G_SELECT %3, %1, %2
     $x0 = COPY %4(s64)
     RET_ReallyLR implicit $x0
 
@@ -112,18 +106,16 @@ body:             |
     ; CHECK: liveins: $d0, $x1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY]](s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr(s64) = COPY [[COPY2]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s64) = G_SELECT [[TRUNC]](s1), [[COPY1]], [[COPY3]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:fpr(s64) = G_SELECT [[COPY]](s32), [[COPY1]], [[COPY3]]
     ; CHECK-NEXT: $d0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %3:_(s32) = COPY $w0
-    %0:_(s1) = G_TRUNC %3(s32)
     %1:_(s64) = COPY $d0
     %2:_(s64) = COPY $x1
-    %4:_(s64) = G_SELECT %0(s1), %1, %2
+    %4:_(s64) = G_SELECT %3, %1, %2
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 
@@ -145,18 +137,16 @@ body:             |
     ; CHECK: liveins: $d0, $x1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY]](s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr(s64) = COPY $d0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr(s64) = COPY $x1
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gpr(s64) = COPY [[COPY1]](s64)
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:gpr(s64) = G_SELECT [[TRUNC]](s1), [[COPY3]], [[COPY2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:gpr(s64) = G_SELECT [[COPY]](s32), [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $x0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %3:_(s32) = COPY $w0
-    %0:_(s1) = G_TRUNC %3(s32)
     %1:_(s64) = COPY $d0
     %2:_(s64) = COPY $x1
-    %4:_(s64) = G_SELECT %0(s1), %1, %2
+    %4:_(s64) = G_SELECT %3, %1, %2
     $x0 = COPY %4(s64)
     RET_ReallyLR implicit $x0
 
@@ -177,16 +167,14 @@ body:             |
     ; CHECK: liveins: $x0, $x1, $w0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
-    ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY]](s32)
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $x0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr(s64) = COPY $x1
-    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:gpr(s64) = G_SELECT [[TRUNC]](s1), [[COPY1]], [[COPY2]]
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:gpr(s64) = G_SELECT [[COPY]](s32), [[COPY1]], [[COPY2]]
     ; CHECK-NEXT: $d0 = COPY [[SELECT]](s64)
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %3:_(s32) = COPY $w0
-    %0:_(s1) = G_TRUNC %3(s32)
     %1:_(s64) = COPY $x0
     %2:_(s64) = COPY $x1
-    %4:_(s64) = G_SELECT %0(s1), %1, %2
+    %4:_(s64) = G_SELECT %3, %1, %2
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
index 2ecde8b582b5..8d79c4412de6 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir
@@ -860,8 +860,7 @@ body:             |
   ; CHECK:   [[COPY:%[0-9]+]]:gpr(p0) = COPY $x0
   ; CHECK:   [[COPY1:%[0-9]+]]:gpr(p0) = COPY $x1
   ; CHECK:   [[COPY2:%[0-9]+]]:gpr(s32) = COPY $w2
-  ; CHECK:   [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[COPY2]](s32)
-  ; CHECK:   G_BRCOND [[TRUNC]](s1), %bb.1
+  ; CHECK:   G_BRCOND [[COPY2]](s32), %bb.1
   ; CHECK:   G_BR %bb.2
   ; CHECK: bb.1:
   ; CHECK:   successors: %bb.2(0x80000000)
@@ -876,8 +875,7 @@ body:             |
     %0(p0) = COPY $x0
     %1(p0) = COPY $x1
     %4(s32) = COPY $w2
-    %2(s1) = G_TRUNC %4(s32)
-    G_BRCOND %2(s1), %bb.1
+    G_BRCOND %4, %bb.1
     G_BR %bb.2
 
   bb.1:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-br.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-br.mir
index 03f16f784537..e47411b7b178 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-br.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-br.mir
@@ -43,8 +43,7 @@ body:             |
   bb.0:
     successors: %bb.0, %bb.1
     %1(s32) = COPY $w0
-    %0(s1) = G_TRUNC %1
-    G_BRCOND %0(s1), %bb.1
+    G_BRCOND %1, %bb.1
     G_BR %bb.0
 
   bb.1:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
index 663a7e96130a..4cf3c5769dda 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-brcond-of-binop.mir
@@ -30,8 +30,7 @@ body:             |
     %4:gpr(s32) = G_ANYEXT %8(s8)
     %5:gpr(s32) = G_CONSTANT i32 1
     %6:gpr(s32) = G_XOR %4, %5
-    %3:gpr(s1) = G_TRUNC %6(s32)
-    G_BRCOND %3(s1), %bb.3
+    G_BRCOND %6, %bb.3
 
   bb.2:
     RET_ReallyLR
@@ -73,8 +72,8 @@ body:             |
     %4:gpr(s64) = G_ANYEXT %8(s8)
     %5:gpr(s64) = G_CONSTANT i64 1
     %6:gpr(s64) = G_XOR %4, %5
-    %3:gpr(s1) = G_TRUNC %6(s64)
-    G_BRCOND %3(s1), %bb.3
+    %3:gpr(s32) = G_TRUNC %6(s64)
+    G_BRCOND %3(s32), %bb.3
 
   bb.2:
     RET_ReallyLR
@@ -107,8 +106,7 @@ body:             |
     %lhs:gpr(s32) = COPY $w0
     %rhs:gpr(s32) = G_CONSTANT i32 1
     %op:gpr(s32) = G_AND %lhs, %rhs
-    %trunc:gpr(s1) = G_TRUNC %op(s32)
-    G_BRCOND %trunc(s1), %bb.3
+    G_BRCOND %op, %bb.3
 
   bb.2:
     RET_ReallyLR
@@ -143,8 +141,7 @@ body:             |
     %lhs:gpr(s32) = COPY $w0
     %rhs:gpr(s32) = COPY $w1
     %op:gpr(s32) = G_AND %lhs, %rhs
-    %trunc:gpr(s1) = G_TRUNC %op(s32)
-    G_BRCOND %trunc(s1), %bb.3
+    G_BRCOND %op, %bb.3
 
   bb.2:
     RET_ReallyLR
@@ -182,8 +179,7 @@ body:             |
     %lhs:gpr(s32) = COPY $w0
     %rhs:gpr(s32) = G_CONSTANT i32 1
     %op:gpr(s32) = G_SHL %lhs, %rhs
-    %trunc:gpr(s1) = G_TRUNC %op(s32)
-    G_BRCOND %trunc(s1), %bb.3
+    G_BRCOND %op, %bb.3
 
   bb.2:
     RET_ReallyLR
@@ -220,8 +216,7 @@ body:             |
     %lhs:gpr(s32) = COPY $w0
     %rhs:gpr(s32) = G_CONSTANT i32 1
     %op:gpr(s32) = G_ASHR %lhs, %rhs
-    %trunc:gpr(s1) = G_TRUNC %op(s32)
-    G_BRCOND %trunc(s1), %bb.3
+    G_BRCOND %op, %bb.3
 
   bb.2:
     RET_ReallyLR
@@ -254,8 +249,8 @@ body:             |
     %lhs:gpr(s64) = COPY $x0
     %rhs:gpr(s64) = G_CONSTANT i64 8589934592
     %op:gpr(s64) = G_ASHR %lhs, %rhs
-    %trunc:gpr(s1) = G_TRUNC %op(s64)
-    G_BRCOND %trunc(s1), %bb.3
+    %trunc:gpr(s32) = G_TRUNC %op(s64)
+    G_BRCOND %trunc, %bb.3
   bb.2:
     RET_ReallyLR
   bb.3:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
index 5b38abd87e44..0da29c9d0c11 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-cbz.mir
@@ -24,8 +24,7 @@ body:             |
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_ICMP intpred(eq), %0, %1
-    %3:gpr(s1) = G_TRUNC %2(s32)
-    G_BRCOND %3(s1), %bb.1
+    G_BRCOND %2, %bb.1
     G_BR %bb.0
 
   bb.1:
@@ -54,8 +53,7 @@ body:             |
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s32) = G_ICMP intpred(eq), %0, %1
-    %3:gpr(s1) = G_TRUNC %2(s32)
-    G_BRCOND %3(s1), %bb.1
+    G_BRCOND %2, %bb.1
     G_BR %bb.0
 
   bb.1:
@@ -84,8 +82,7 @@ body:             |
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = G_CONSTANT i32 0
     %2:gpr(s32) = G_ICMP intpred(ne), %0, %1
-    %3:gpr(s1) = G_TRUNC %2(s32)
-    G_BRCOND %3(s1), %bb.1
+    G_BRCOND %2, %bb.1
     G_BR %bb.0
 
   bb.1:
@@ -114,8 +111,7 @@ body:             |
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 0
     %2:gpr(s32) = G_ICMP intpred(ne), %0, %1
-    %3:gpr(s1) = G_TRUNC %2(s32)
-    G_BRCOND %3(s1), %bb.1
+    G_BRCOND %2, %bb.1
     G_BR %bb.0
 
   bb.1:
@@ -150,8 +146,7 @@ body:             |
     %2:gpr(s64) = G_CONSTANT i64 0
     %1:gpr(p0) = G_INTTOPTR %2(s64)
     %4:gpr(s32) = G_ICMP intpred(eq), %0(p0), %1
-    %3:gpr(s1) = G_TRUNC %4(s32)
-    G_BRCOND %3(s1), %bb.3
+    G_BRCOND %4, %bb.3
 
   bb.2:
     %5:gpr(s64) = G_CONSTANT i64 0
@@ -194,8 +189,7 @@ body:             |
     %4:gpr(s64) = G_CONSTANT i64 0
     %1:gpr(s64) = G_LOAD %0(p0) :: (load (s64))
     %5:gpr(s32) = G_ICMP intpred(eq), %1(s64), %2
-    %3:gpr(s1) = G_TRUNC %5(s32)
-    G_BRCOND %3(s1), %bb.3
+    G_BRCOND %5, %bb.3
 
   bb.2:
     %6:gpr(s64) = G_CONSTANT i64 0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
index 3cdbc4f3a2a8..d0230e4adeca 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-int-ptr-casts.mir
@@ -9,7 +9,6 @@
   define void @ptrtoint_s32_p0() { ret void }
   define void @ptrtoint_s16_p0() { ret void }
   define void @ptrtoint_s8_p0() { ret void }
-  define void @ptrtoint_s1_p0() { ret void }
   define void @inttoptr_v2p0_v2s64() { ret void }
   define void @ptrtoint_v2s64_v2p0() { ret void }
 ...
@@ -116,28 +115,6 @@ body:             |
     $w0 = COPY %2(s32)
 ...
 
----
-name:            ptrtoint_s1_p0
-legalized:       true
-regBankSelected: true
-
-registers:
-  - { id: 0, class: gpr }
-  - { id: 1, class: gpr }
-body:             |
-  bb.0:
-    liveins: $x0
-    ; CHECK-LABEL: name: ptrtoint_s1_p0
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]].sub_32
-    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr32all = COPY [[COPY1]]
-    ; CHECK-NEXT: $w0 = COPY [[COPY2]]
-    %0(p0) = COPY $x0
-    %1(s1) = G_PTRTOINT %0
-    %2:gpr(s32) = G_ANYEXT %1
-    $w0 = COPY %2(s32)
-...
-
 ---
 name:            inttoptr_v2p0_v2s64
 legalized:       true

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
index 12b3a23d926b..ee3eacf51f4f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt.mir
@@ -69,8 +69,8 @@ body:             |
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr
   ; CHECK-NEXT:   [[MOVaddrJT:%[0-9]+]]:gpr64common = MOVaddrJT target-flags(aarch64-page) %jump-table.0, target-flags(aarch64-pageoff, aarch64-nc) %jump-table.0
-  ; CHECK-NEXT:   early-clobber %18:gpr64, early-clobber %19:gpr64sp = JumpTableDest32 [[MOVaddrJT]], [[SUBREG_TO_REG]], %jump-table.0
-  ; CHECK-NEXT:   BR %18
+  ; CHECK-NEXT:   early-clobber %17:gpr64, early-clobber %18:gpr64sp = JumpTableDest32 [[MOVaddrJT]], [[SUBREG_TO_REG]], %jump-table.0
+  ; CHECK-NEXT:   BR %17
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.2.sw.bb:
   ; CHECK-NEXT:   successors: %bb.4(0x80000000)
@@ -101,8 +101,7 @@ body:             |
     %3:gpr(s64) = G_ZEXT %2(s32)
     %5:gpr(s64) = G_ZEXT %4(s32)
     %14:gpr(s32) = G_ICMP intpred(ugt), %3(s64), %5
-    %6:gpr(s1) = G_TRUNC %14(s32)
-    G_BRCOND %6(s1), %bb.4
+    G_BRCOND %14, %bb.4
 
   bb.5.entry:
     successors: %bb.3, %bb.4, %bb.2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
index bd001649bfb4..84fc03637333 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-phi.mir
@@ -60,8 +60,7 @@ body:             |
     %3(s32) = G_CONSTANT i32 1
     %5(s32) = G_CONSTANT i32 2
     %8(s32) = G_ICMP intpred(ugt), %0(s32), %1
-    %2(s1) = G_TRUNC %8(s32)
-    G_BRCOND %2(s1), %bb.2.case1
+    G_BRCOND %8, %bb.2.case1
     G_BR %bb.3.case2
 
   bb.2.case1:
@@ -106,9 +105,8 @@ body:             |
 
     %0(p0) = COPY $x0
     %1(p0) = COPY $x1
-    %6:gpr(s32) = COPY $w2
-    %2(s1) = G_TRUNC %6
-    G_BRCOND %2(s1), %bb.1
+    %2:gpr(s32) = COPY $w2
+    G_BRCOND %2, %bb.1
     G_BR %bb.2
 
   bb.1:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir
index 547f27b2c573..524553719902 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-redundant-zext.mir
@@ -163,8 +163,7 @@ body:             |
     %copy1:gpr(s32) = COPY $w0
     %copy2:gpr(s32) = COPY $w1
     %cond_wide:gpr(s32) = COPY $w2
-    %cond:gpr(s1) = G_TRUNC %cond_wide(s32)
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cond_wide, %bb.1
     G_BR %bb.2
 
   bb.1:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
index 70544b7d3faf..8742c7c68458 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-saddo.mir
@@ -19,17 +19,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w0 = COPY [[ADDSWrr]]
+    ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
-    %3:gpr(s32), %4:gpr(s1) = G_SADDO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
-    RET_ReallyLR implicit $w0
+    %3:gpr(s32), %4:gpr(s32) = G_SADDO %0, %1
+    $w0 = COPY %3
+    $w1 = COPY %4
+    RET_ReallyLR implicit $w0, implicit $w1
 
 ...
 ---
@@ -49,17 +47,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $x0 = COPY [[ADDSXrr]]
+    ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
-    %3:gpr(s64), %4:gpr(s1) = G_SADDO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
-    RET_ReallyLR implicit $w0
+    %3:gpr(s64), %4:gpr(s32) = G_SADDO %0, %1
+    $x0 = COPY %3
+    $w1 = COPY %4
+    RET_ReallyLR implicit $x0, implicit $w1
 
 ...
 ---
@@ -83,7 +79,7 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 16
-    %add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_SADDO %copy, %constant
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -112,7 +108,7 @@ body:             |
     %copy2:gpr(s32) = COPY $w1
     %constant:gpr(s32) = G_CONSTANT i32 16
     %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
-    %add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy1, %shift
+    %add:gpr(s32), %overflow:gpr(s32) = G_SADDO %copy1, %shift
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -138,7 +134,7 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 -16
-    %add:gpr(s32), %overflow:gpr(s1) = G_SADDO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_SADDO %copy, %constant
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -167,6 +163,6 @@ body:             |
     %ext:gpr(s64) = G_ZEXT %reg1(s32)
     %cst:gpr(s64) = G_CONSTANT i64 2
     %shift:gpr(s64) = G_SHL %ext, %cst(s64)
-    %add:gpr(s64), %flags:gpr(s1) = G_SADDO %reg0, %shift
+    %add:gpr(s64), %flags:gpr(s32) = G_SADDO %reg0, %shift
     $x0 = COPY %add(s64)
     RET_ReallyLR implicit $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir
index 062ad2527b82..617f209e24b0 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-select.mir
@@ -19,18 +19,14 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr32 = COPY $s0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr32 = COPY $s1
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr32 = COPY [[COPY3]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY4]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELSrrr:%[0-9]+]]:fpr32 = FCSELSrrr [[COPY1]], [[COPY2]], 1, implicit $nzcv
     ; CHECK-NEXT: $s0 = COPY [[FCSELSrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $s0
-    %3:gpr(s32) = COPY $w0
-    %0:gpr(s1) = G_TRUNC %3(s32)
+    %0:gpr(s32) = COPY $w0
     %1:fpr(s32) = COPY $s0
     %2:fpr(s32) = COPY $s1
-    %5:fpr(s1) = COPY %0(s1)
-    %4:fpr(s32) = G_SELECT %5(s1), %1, %2
+    %4:fpr(s32) = G_SELECT %0, %1, %2
     $s0 = COPY %4(s32)
     RET_ReallyLR implicit $s0
 
@@ -52,18 +48,14 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fpr64 = COPY $d0
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fpr64 = COPY $d1
-    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fpr32 = COPY [[COPY]]
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr32 = COPY [[COPY3]]
-    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY4]], 0, implicit-def $nzcv
+    ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri [[COPY]], 0, implicit-def $nzcv
     ; CHECK-NEXT: [[FCSELDrrr:%[0-9]+]]:fpr64 = FCSELDrrr [[COPY1]], [[COPY2]], 1, implicit $nzcv
     ; CHECK-NEXT: $d0 = COPY [[FCSELDrrr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $d0
-    %3:gpr(s32) = COPY $w0
-    %0:gpr(s1) = G_TRUNC %3(s32)
+    %0:gpr(s32) = COPY $w0
     %1:fpr(s64) = COPY $d0
     %2:fpr(s64) = COPY $d1
-    %5:fpr(s1) = COPY %0(s1)
-    %4:fpr(s64) = G_SELECT %5(s1), %1, %2
+    %4:fpr(s64) = G_SELECT %0, %1, %2
     $d0 = COPY %4(s64)
     RET_ReallyLR implicit $d0
 ...
@@ -89,10 +81,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = COPY $w2
     %f:gpr(s32) = COPY $w3
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -118,10 +109,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = G_CONSTANT i32 0
     %f:gpr(s32) = G_CONSTANT i32 1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -147,10 +137,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = G_CONSTANT i32 0
     %f:gpr(s32) = G_CONSTANT i32 -1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -177,10 +166,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = G_CONSTANT i32 1
     %f:gpr(s32) = COPY $w2
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -207,10 +195,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = G_CONSTANT i32 -1
     %f:gpr(s32) = COPY $w2
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -237,10 +224,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = COPY $w2
     %f:gpr(s32) = G_CONSTANT i32 1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -267,10 +253,9 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %cmp:gpr(s32) = G_ICMP intpred(ne), %reg0(s32), %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
     %t:gpr(s32) = COPY $w2
     %f:gpr(s32) = G_CONSTANT i32 -1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %cmp, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -294,10 +279,9 @@ body:             |
     ; CHECK-NEXT: $w0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %t:gpr(s32) = G_CONSTANT i32 1
     %f:gpr(s32) = COPY $w1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %reg0, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -322,10 +306,9 @@ body:             |
     ; CHECK-NEXT: $w0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %t:gpr(s32) = COPY $w1
     %f:gpr(s32) = G_CONSTANT i32 1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s32) = G_SELECT %reg0, %t, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -343,18 +326,18 @@ body:             |
     ; CHECK-LABEL: name: csinc_t_1_no_cmp_s64
     ; CHECK: liveins: $x0, $x1
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %reg0:gpr64 = COPY $x0
-    ; CHECK-NEXT: %cond:gpr32 = COPY %reg0.sub_32
+    ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %cond:gpr32common = COPY %reg0.sub_32
     ; CHECK-NEXT: %f:gpr64 = COPY $x1
     ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %cond, 0, implicit-def $nzcv
     ; CHECK-NEXT: %select:gpr64 = CSINCXr %f, $xzr, 0, implicit $nzcv
     ; CHECK-NEXT: $x0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %reg0:gpr(s64) = COPY $x0
-    %cond:gpr(s1) = G_TRUNC %reg0(s64)
+    %cond:gpr(s32) = G_TRUNC %reg0(s64)
     %t:gpr(s64) = G_CONSTANT i64 1
     %f:gpr(s64) = COPY $x1
-    %select:gpr(s64) = G_SELECT %cond(s1), %t, %f
+    %select:gpr(s64) = G_SELECT %cond, %t, %f
     $x0 = COPY %select(s64)
     RET_ReallyLR implicit $x0
 
@@ -380,12 +363,11 @@ body:             |
     ; CHECK-NEXT: $w0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %reg1:gpr(s32) = COPY $w1
     %t:gpr(s32) = COPY $w2
     %zero:gpr(s32) = G_CONSTANT i32 0
     %sub:gpr(s32) = G_SUB %zero(s32), %reg1
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %sub
+    %select:gpr(s32) = G_SELECT %reg0, %t, %sub
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -411,12 +393,11 @@ body:             |
     ; CHECK-NEXT: $w0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %reg1:gpr(s32) = COPY $w1
     %f:gpr(s32) = COPY $w2
     %zero:gpr(s32) = G_CONSTANT i32 0
     %sub:gpr(s32) = G_SUB %zero(s32), %reg1
-    %select:gpr(s32) = G_SELECT %cond(s1), %sub, %f
+    %select:gpr(s32) = G_SELECT %reg0, %sub, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -434,8 +415,8 @@ body:             |
     ; CHECK-LABEL: name: csneg_s64
     ; CHECK: liveins: $x0, $x1, $x2
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %reg0:gpr64 = COPY $x0
-    ; CHECK-NEXT: %cond:gpr32 = COPY %reg0.sub_32
+    ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
+    ; CHECK-NEXT: %cond:gpr32common = COPY %reg0.sub_32
     ; CHECK-NEXT: %reg1:gpr64 = COPY $x1
     ; CHECK-NEXT: %t:gpr64 = COPY $x2
     ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %cond, 0, implicit-def $nzcv
@@ -443,12 +424,12 @@ body:             |
     ; CHECK-NEXT: $x0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %reg0:gpr(s64) = COPY $x0
-    %cond:gpr(s1) = G_TRUNC %reg0(s64)
+    %cond:gpr(s32) = G_TRUNC %reg0(s64)
     %reg1:gpr(s64) = COPY $x1
     %t:gpr(s64) = COPY $x2
     %zero:gpr(s64) = G_CONSTANT i64 0
     %sub:gpr(s64) = G_SUB %zero(s64), %reg1
-    %select:gpr(s64) = G_SELECT %cond(s1), %t, %sub
+    %select:gpr(s64) = G_SELECT %cond, %t, %sub
     $x0 = COPY %select(s64)
     RET_ReallyLR implicit $x0
 ...
@@ -474,13 +455,12 @@ body:             |
     ; CHECK-NEXT: $w0 = COPY %select
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %reg1:gpr(s32) = COPY $w1
     %t:gpr(s32) = G_CONSTANT i32 1
     %zero:gpr(s32) = G_CONSTANT i32 0
     %reg2:gpr(s32) = COPY $w2
     %sub:gpr(s32) = G_SUB %zero(s32), %reg2
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %sub
+    %select:gpr(s32) = G_SELECT %reg0, %t, %sub
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 ...
@@ -506,11 +486,10 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %t:gpr(s32) = COPY $w2
     %negative_one:gpr(s32) = G_CONSTANT i32 -1
     %xor:gpr(s32) = G_XOR %reg1(s32), %negative_one
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %xor
+    %select:gpr(s32) = G_SELECT %reg0, %t, %xor
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -537,11 +516,10 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %f:gpr(s32) = COPY $w2
     %negative_one:gpr(s32) = G_CONSTANT i32 -1
     %xor:gpr(s32) = G_XOR %reg1(s32), %negative_one
-    %select:gpr(s32) = G_SELECT %cond(s1), %xor, %f
+    %select:gpr(s32) = G_SELECT %reg0, %xor, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -559,9 +537,9 @@ body:             |
     ; CHECK-LABEL: name: csinv_s64
     ; CHECK: liveins: $x0, $x1, $x2
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %reg0:gpr64 = COPY $x0
+    ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
     ; CHECK-NEXT: %reg1:gpr64 = COPY $x1
-    ; CHECK-NEXT: %cond:gpr32 = COPY %reg0.sub_32
+    ; CHECK-NEXT: %cond:gpr32common = COPY %reg0.sub_32
     ; CHECK-NEXT: %t:gpr64 = COPY $x2
     ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %cond, 0, implicit-def $nzcv
     ; CHECK-NEXT: %select:gpr64 = CSINVXr %t, %reg1, 1, implicit $nzcv
@@ -569,11 +547,11 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %reg0:gpr(s64) = COPY $x0
     %reg1:gpr(s64) = COPY $x1
-    %cond:gpr(s1) = G_TRUNC %reg0(s64)
+    %cond:gpr(s32) = G_TRUNC %reg0(s64)
     %t:gpr(s64) = COPY $x2
     %negative_one:gpr(s64) = G_CONSTANT i64 -1
     %xor:gpr(s64) = G_XOR %reg1(s64), %negative_one
-    %select:gpr(s64) = G_SELECT %cond(s1), %t, %xor
+    %select:gpr(s64) = G_SELECT %cond, %t, %xor
     $x0 = COPY %select(s64)
     RET_ReallyLR implicit $x0
 
@@ -591,9 +569,9 @@ body:             |
     ; CHECK-LABEL: name: xor_not_negative_one
     ; CHECK: liveins: $x0, $x1, $x2
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %reg0:gpr64 = COPY $x0
+    ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
     ; CHECK-NEXT: %reg1:gpr64 = COPY $x1
-    ; CHECK-NEXT: %cond:gpr32 = COPY %reg0.sub_32
+    ; CHECK-NEXT: %cond:gpr32common = COPY %reg0.sub_32
     ; CHECK-NEXT: %t:gpr64 = COPY $x2
     ; CHECK-NEXT: %negative_one:gpr32 = MOVi32imm -1
     ; CHECK-NEXT: %zext:gpr64 = SUBREG_TO_REG 0, %negative_one, %subreg.sub_32
@@ -604,12 +582,12 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %reg0:gpr(s64) = COPY $x0
     %reg1:gpr(s64) = COPY $x1
-    %cond:gpr(s1) = G_TRUNC %reg0(s64)
+    %cond:gpr(s32) = G_TRUNC %reg0(s64)
     %t:gpr(s64) = COPY $x2
     %negative_one:gpr(s32) = G_CONSTANT i32 -1
     %zext:gpr(s64) = G_ZEXT %negative_one(s32)
     %xor:gpr(s64) = G_XOR %reg1(s64), %zext
-    %select:gpr(s64) = G_SELECT %cond(s1), %t, %xor
+    %select:gpr(s64) = G_SELECT %cond(s32), %t, %xor
     $x0 = COPY %select(s64)
     RET_ReallyLR implicit $x0
 
@@ -635,11 +613,10 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %t:gpr(s32) = COPY $w2
     %one:gpr(s32) = G_CONSTANT i32 1
     %add:gpr(s32) = G_ADD %reg1(s32), %one
-    %select:gpr(s32) = G_SELECT %cond(s1), %t, %add
+    %select:gpr(s32) = G_SELECT %reg0, %t, %add
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -665,11 +642,10 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %f:gpr(s32) = COPY $w2
     %one:gpr(s32) = G_CONSTANT i32 1
     %add:gpr(s32) = G_ADD %reg1(s32), %one
-    %select:gpr(s32) = G_SELECT %cond(s1), %add, %f
+    %select:gpr(s32) = G_SELECT %reg0, %add, %f
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0
 
@@ -687,9 +663,9 @@ body:             |
     ; CHECK-LABEL: name: csinc_ptr_add
     ; CHECK: liveins: $x0, $x1, $x2
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %reg0:gpr64 = COPY $x0
+    ; CHECK-NEXT: %reg0:gpr64sp = COPY $x0
     ; CHECK-NEXT: %reg1:gpr64 = COPY $x1
-    ; CHECK-NEXT: %cond:gpr32 = COPY %reg0.sub_32
+    ; CHECK-NEXT: %cond:gpr32common = COPY %reg0.sub_32
     ; CHECK-NEXT: %t:gpr64 = COPY $x2
     ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32 = ANDSWri %cond, 0, implicit-def $nzcv
     ; CHECK-NEXT: %select:gpr64 = CSINCXr %t, %reg1, 1, implicit $nzcv
@@ -697,11 +673,11 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %reg0:gpr(s64) = COPY $x0
     %reg1:gpr(p0) = COPY $x1
-    %cond:gpr(s1) = G_TRUNC %reg0(s64)
+    %cond:gpr(s32) = G_TRUNC %reg0(s64)
     %t:gpr(p0) = COPY $x2
     %one:gpr(s64) = G_CONSTANT i64 1
     %ptr_add:gpr(p0) = G_PTR_ADD %reg1(p0), %one
-    %select:gpr(p0) = G_SELECT %cond(s1), %t, %ptr_add
+    %select:gpr(p0) = G_SELECT %cond(s32), %t, %ptr_add
     $x0 = COPY %select(p0)
     RET_ReallyLR implicit $x0
 
@@ -728,12 +704,11 @@ body:             |
     %reg0:gpr(s32) = COPY $w0
     %reg1:gpr(s32) = COPY $w1
     %reg2:gpr(s32) = COPY $w2
-    %cond:gpr(s1) = G_TRUNC %reg0(s32)
     %f:gpr(s32) = COPY $w2
     %negative_one:gpr(s32) = G_CONSTANT i32 -1
     %xor:gpr(s32) = G_XOR %reg1(s32), %negative_one
     %zero:gpr(s32) = G_CONSTANT i32 0
     %sub:gpr(s32) = G_SUB %zero(s32), %reg2
-    %select:gpr(s32) = G_SELECT %cond(s1), %xor, %sub
+    %select:gpr(s32) = G_SELECT %reg0, %xor, %sub
     $w0 = COPY %select(s32)
     RET_ReallyLR implicit $w0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
index 8b7b8464b555..9055dd20d312 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-ssubo.mir
@@ -19,17 +19,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w0 = COPY [[SUBSWrr]]
+    ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
-    %3:gpr(s32), %4:gpr(s1) = G_SSUBO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
-    RET_ReallyLR implicit $w0
+    %3:gpr(s32), %4:gpr(s32) = G_SSUBO %0, %1
+    $w0 = COPY %3
+    $w1 = COPY %4
+    RET_ReallyLR implicit $w0, implicit $w1
 
 ...
 ---
@@ -49,17 +47,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 7, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $x0 = COPY [[SUBSXrr]]
+    ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
-    %3:gpr(s64), %4:gpr(s1) = G_SSUBO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
-    RET_ReallyLR implicit $w0
+    %3:gpr(s64), %4:gpr(s32) = G_SSUBO %0, %1
+    $x0 = COPY %3
+    $w1 = COPY %4
+    RET_ReallyLR implicit $x0, implicit $w1
 
 ...
 ---
@@ -83,7 +79,7 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 16
-    %add:gpr(s32), %overflow:gpr(s1) = G_SSUBO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_SSUBO %copy, %constant
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -112,7 +108,7 @@ body:             |
     %copy2:gpr(s32) = COPY $w1
     %constant:gpr(s32) = G_CONSTANT i32 16
     %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
-    %add:gpr(s32), %overflow:gpr(s1) = G_SSUBO %copy1, %shift
+    %add:gpr(s32), %overflow:gpr(s32) = G_SSUBO %copy1, %shift
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -138,7 +134,7 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 -16
-    %add:gpr(s32), %overflow:gpr(s1) = G_SSUBO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_SSUBO %copy, %constant
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -167,6 +163,6 @@ body:             |
     %ext:gpr(s64) = G_ZEXT %reg1(s32)
     %cst:gpr(s64) = G_CONSTANT i64 2
     %shift:gpr(s64) = G_SHL %ext, %cst(s64)
-    %add:gpr(s64), %flags:gpr(s1) = G_SSUBO %reg0, %shift
+    %add:gpr(s64), %flags:gpr(s32) = G_SSUBO %reg0, %shift
     $x0 = COPY %add(s64)
     RET_ReallyLR implicit $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
index 6ec4f67238ea..2145ba308664 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-static.mir
@@ -141,21 +141,15 @@ body:             |
 
     %0(s32) = COPY $w0
     %1(s32) = G_ICMP intpred(eq), %0, %0
-    %6(s1) = G_TRUNC %1(s32)
-    %9(s32) = G_ANYEXT %6
-    $w0 = COPY %9(s32)
+    $w0 = COPY %1
 
     %2(s64) = COPY $x0
     %3(s32) = G_ICMP intpred(uge), %2, %2
-    %7(s1) = G_TRUNC %3(s32)
-    %10(s32) = G_ANYEXT %7
-    $w0 = COPY %10(s32)
+    $w0 = COPY %3
 
     %4(p0) = COPY $x0
     %5(s32) = G_ICMP intpred(ne), %4, %4
-    %8(s1) = G_TRUNC %5(s32)
-    %11(s32) = G_ANYEXT %8
-    $w0 = COPY %11(s32)
+    $w0 = COPY %5
 ...
 
 ---
@@ -176,10 +170,6 @@ registers:
   - { id: 1, class: gpr }
   - { id: 2, class: fpr }
   - { id: 3, class: gpr }
-  - { id: 4, class: gpr }
-  - { id: 5, class: gpr }
-  - { id: 6, class: gpr }
-  - { id: 7, class: gpr }
 
 # CHECK:  body:
 # CHECK:    nofpexcept FCMPSrr %0, %0, implicit-def $nzcv
@@ -196,15 +186,11 @@ body:             |
 
     %0(s32) = COPY $s0
     %1(s32) = G_FCMP floatpred(one), %0, %0
-    %4(s1) = G_TRUNC %1(s32)
-    %6(s32) = G_ANYEXT %4
-    $w0 = COPY %6(s32)
+    $w0 = COPY %1
 
     %2(s64) = COPY $d0
     %3(s32) = G_FCMP floatpred(uge), %2, %2
-    %5(s1) = G_TRUNC %3(s32)
-    %7(s32) = G_ANYEXT %5
-    $w0 = COPY %7(s32)
+    $w0 = COPY %3
 
 ...
 
@@ -235,12 +221,11 @@ body:             |
     successors: %bb.1
     %0(s32) = COPY $s0
     %3:gpr(s32) = COPY $w0
-    %1(s1) = G_TRUNC %3
 
   bb.1:
     successors: %bb.1, %bb.2
     %2(s32) = PHI %0, %bb.0, %2, %bb.1
-    G_BRCOND %1, %bb.1
+    G_BRCOND %3, %bb.1
 
   bb.2:
     $s0 = COPY %2
@@ -278,17 +263,16 @@ registers:
   - { id: 9, class: gpr }
 
 # CHECK:  body:
-# CHECK:      ANDSWri %10, 0, implicit-def $nzcv
+# CHECK:      ANDSWri %0, 0, implicit-def $nzcv
 # CHECK:      %3:gpr32 = CSELWr %1, %2, 1, implicit $nzcv
-# CHECK:      ANDSWri %10, 0, implicit-def $nzcv
+# CHECK:      ANDSWri %0, 0, implicit-def $nzcv
 # CHECK:      %6:gpr64 = CSELXr %4, %5, 1, implicit $nzcv
-# CHECK:      ANDSWri %10, 0, implicit-def $nzcv
+# CHECK:      ANDSWri %0, 0, implicit-def $nzcv
 # CHECK:      %9:gpr64 = CSELXr %7, %8, 1, implicit $nzcv
 body:             |
   bb.0:
     liveins: $w0, $w1, $w2
-    %10:gpr(s32) = COPY $w0
-    %0(s1) = G_TRUNC %10
+    %0:gpr(s32) = COPY $w0
 
     %1(s32) = COPY $w1
     %2(s32) = COPY $w2

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
index f7a7f8ede77e..374b5a109d8d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-trunc.mir
@@ -6,7 +6,7 @@
 
   define void @trunc_s32_s64() { ret void }
   define void @trunc_s8_s64() { ret void }
-  define void @trunc_s1_s32() { ret void }
+  define void @trunc_s8_s32() { ret void }
   define void @trunc_s64_s128() { ret void }
   define void @trunc_s32_s128() { ret void }
 ...
@@ -58,7 +58,7 @@ body:             |
 ...
 
 ---
-name:            trunc_s1_s32
+name:            trunc_s8_s32
 legalized:       true
 regBankSelected: true
 
@@ -70,12 +70,12 @@ body:             |
   bb.0:
     liveins: $w0
 
-    ; CHECK-LABEL: name: trunc_s1_s32
+    ; CHECK-LABEL: name: trunc_s8_s32
     ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32all = COPY [[COPY]]
     ; CHECK-NEXT: $w0 = COPY [[COPY1]]
     %0(s32) = COPY $w0
-    %1(s1) = G_TRUNC %0
+    %1(s8) = G_TRUNC %0
     %2:gpr(s32) = G_ANYEXT %1
     $w0 = COPY %2(s32)
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-truncstore-atomic.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-truncstore-atomic.mir
index 8b5b33bf7d54..b207c851c8ba 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-truncstore-atomic.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-truncstore-atomic.mir
@@ -36,10 +36,7 @@ body:             |
 
     %0:gpr(p0) = COPY $x0
     %3:gpr(s32) = COPY $w1
-    %2:gpr(s8) = G_TRUNC %3(s32)
-    %4:gpr(s8) = G_ASSERT_ZEXT %2, 1
-    %1:gpr(s1) = G_TRUNC %4(s8)
-    G_BRCOND %1(s1), %bb.3
+    G_BRCOND %3, %bb.3
     G_BR %bb.2
 
   bb.2:
@@ -85,10 +82,7 @@ body:             |
 
     %0:gpr(p0) = COPY $x0
     %3:gpr(s32) = COPY $w1
-    %2:gpr(s8) = G_TRUNC %3(s32)
-    %4:gpr(s8) = G_ASSERT_ZEXT %2, 1
-    %1:gpr(s1) = G_TRUNC %4(s8)
-    G_BRCOND %1(s1), %bb.3
+    G_BRCOND %3, %bb.3
     G_BR %bb.2
 
   bb.2:
@@ -134,10 +128,7 @@ body:             |
 
     %0:gpr(p0) = COPY $x0
     %3:gpr(s32) = COPY $w1
-    %2:gpr(s8) = G_TRUNC %3(s32)
-    %4:gpr(s8) = G_ASSERT_ZEXT %2, 1
-    %1:gpr(s1) = G_TRUNC %4(s8)
-    G_BRCOND %1(s1), %bb.3
+    G_BRCOND %3, %bb.3
     G_BR %bb.2
 
   bb.2:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
index bddf0ccfb22d..c325eb809eca 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-uaddo.mir
@@ -19,17 +19,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: [[ADDSWrr:%[0-9]+]]:gpr32 = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w0 = COPY [[ADDSWrr]]
+    ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
-    %3:gpr(s32), %4:gpr(s1) = G_UADDO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
-    RET_ReallyLR implicit $w0
+    %3:gpr(s32), %4:gpr(s32) = G_UADDO %0, %1
+    $w0 = COPY %3
+    $w1 = COPY %4
+    RET_ReallyLR implicit $w0, implicit $w1
 
 ...
 ---
@@ -49,17 +47,15 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 3, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $x0 = COPY [[ADDSXrr]]
+    ; CHECK-NEXT: $w1 = COPY [[CSINCWr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
-    %3:gpr(s64), %4:gpr(s1) = G_UADDO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
-    RET_ReallyLR implicit $w0
+    %3:gpr(s64), %4:gpr(s32) = G_UADDO %0, %1
+    $x0 = COPY %3
+    $w1 = COPY %4
+    RET_ReallyLR implicit $x0, implicit $w1
 
 ...
 ---
@@ -83,7 +79,7 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 16
-    %add:gpr(s32), %overflow:gpr(s1) = G_UADDO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -112,7 +108,7 @@ body:             |
     %copy2:gpr(s32) = COPY $w1
     %constant:gpr(s32) = G_CONSTANT i32 16
     %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
-    %add:gpr(s32), %overflow:gpr(s1) = G_UADDO %copy1, %shift
+    %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy1, %shift
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -138,7 +134,7 @@ body:             |
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 -16
-    %add:gpr(s32), %overflow:gpr(s1) = G_UADDO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_UADDO %copy, %constant
     $w0 = COPY %add(s32)
     RET_ReallyLR implicit $w0
 
@@ -167,6 +163,6 @@ body:             |
     %ext:gpr(s64) = G_ZEXT %reg1(s32)
     %cst:gpr(s64) = G_CONSTANT i64 2
     %shift:gpr(s64) = G_SHL %ext, %cst(s64)
-    %add:gpr(s64), %flags:gpr(s1) = G_UADDO %reg0, %shift
+    %add:gpr(s64), %flags:gpr(s32) = G_UADDO %reg0, %shift
     $x0 = COPY %add(s64)
     RET_ReallyLR implicit $x0

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
index 3ad2231eac75..45e02b40812d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-usubo.mir
@@ -19,16 +19,12 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1
     ; CHECK-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s32) = COPY $w0
     %1:gpr(s32) = COPY $w1
-    %3:gpr(s32), %4:gpr(s1) = G_USUBO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
+    %3:gpr(s32), %4:gpr(s32) = G_USUBO %0, %1
+    $w0 = COPY %4(s32)
     RET_ReallyLR implicit $w0
 
 ...
@@ -49,16 +45,12 @@ body:             |
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
     ; CHECK-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[COPY1]], implicit-def $nzcv
     ; CHECK-NEXT: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
-    ; CHECK-NEXT: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[CSINCWr]], 0, 0
-    ; CHECK-NEXT: [[UBFMWri1:%[0-9]+]]:gpr32 = UBFMWri [[UBFMWri]], 0, 7
-    ; CHECK-NEXT: $w0 = COPY [[UBFMWri1]]
+    ; CHECK-NEXT: $w0 = COPY [[CSINCWr]]
     ; CHECK-NEXT: RET_ReallyLR implicit $w0
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = COPY $x1
-    %3:gpr(s64), %4:gpr(s1) = G_USUBO %0, %1
-    %5:gpr(s8) = G_ZEXT %4(s1)
-    %6:gpr(s32) = G_ZEXT %5(s8)
-    $w0 = COPY %6(s32)
+    %3:gpr(s64), %4:gpr(s32) = G_USUBO %0, %1
+    $w0 = COPY %4
     RET_ReallyLR implicit $w0
 
 ...
@@ -80,12 +72,14 @@ body:             |
     ; CHECK-NEXT: %add:gpr32 = SUBSWri %copy, 16, 0, implicit-def $nzcv
     ; CHECK-NEXT: %overflow:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
     ; CHECK-NEXT: $w0 = COPY %add
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w1 = COPY %overflow
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 16
-    %add:gpr(s32), %overflow:gpr(s1) = G_USUBO %copy, %constant
-    $w0 = COPY %add(s32)
-    RET_ReallyLR implicit $w0
+    %add:gpr(s32), %overflow:gpr(s32) = G_USUBO %copy, %constant
+    $w0 = COPY %add
+    $w1 = COPY %overflow
+    RET_ReallyLR implicit $w0, implicit $w1
 
 ...
 ---
@@ -107,14 +101,16 @@ body:             |
     ; CHECK-NEXT: %add:gpr32 = SUBSWrs %copy1, %copy2, 16, implicit-def $nzcv
     ; CHECK-NEXT: %overflow:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
     ; CHECK-NEXT: $w0 = COPY %add
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w1 = COPY %overflow
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
     %copy1:gpr(s32) = COPY $w0
     %copy2:gpr(s32) = COPY $w1
     %constant:gpr(s32) = G_CONSTANT i32 16
     %shift:gpr(s32) = G_SHL %copy2(s32), %constant(s32)
-    %add:gpr(s32), %overflow:gpr(s1) = G_USUBO %copy1, %shift
+    %add:gpr(s32), %overflow:gpr(s32) = G_USUBO %copy1, %shift
     $w0 = COPY %add(s32)
-    RET_ReallyLR implicit $w0
+    $w1 = COPY %overflow(s32)
+    RET_ReallyLR implicit $w0, implicit $w1
 
 ...
 ---
@@ -135,12 +131,14 @@ body:             |
     ; CHECK-NEXT: %add:gpr32 = ADDSWri %copy, 16, 0, implicit-def $nzcv
     ; CHECK-NEXT: %overflow:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
     ; CHECK-NEXT: $w0 = COPY %add
-    ; CHECK-NEXT: RET_ReallyLR implicit $w0
+    ; CHECK-NEXT: $w1 = COPY %overflow
+    ; CHECK-NEXT: RET_ReallyLR implicit $w0, implicit $w1
     %copy:gpr(s32) = COPY $w0
     %constant:gpr(s32) = G_CONSTANT i32 -16
-    %add:gpr(s32), %overflow:gpr(s1) = G_USUBO %copy, %constant
+    %add:gpr(s32), %overflow:gpr(s32) = G_USUBO %copy, %constant
     $w0 = COPY %add(s32)
-    RET_ReallyLR implicit $w0
+    $w1 = COPY %overflow(s32)
+    RET_ReallyLR implicit $w0, implicit $w1
 
 ...
 ---
@@ -161,12 +159,14 @@ body:             |
     ; CHECK-NEXT: %add:gpr64 = SUBSXrx %reg0, %reg1, 18, implicit-def $nzcv
     ; CHECK-NEXT: %flags:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
     ; CHECK-NEXT: $x0 = COPY %add
-    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: $w1 = COPY %flags
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $w1
     %reg0:gpr(s64) = COPY $x0
     %reg1:gpr(s32) = COPY $w0
     %ext:gpr(s64) = G_ZEXT %reg1(s32)
     %cst:gpr(s64) = G_CONSTANT i64 2
     %shift:gpr(s64) = G_SHL %ext, %cst(s64)
-    %add:gpr(s64), %flags:gpr(s1) = G_USUBO %reg0, %shift
+    %add:gpr(s64), %flags:gpr(s32) = G_USUBO %reg0, %shift
     $x0 = COPY %add(s64)
-    RET_ReallyLR implicit $x0
+    $w1 = COPY %flags
+    RET_ReallyLR implicit $x0, implicit $w1

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
index c87ced71f03d..60cddbf794bc 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select.mir
@@ -144,19 +144,19 @@ body:             |
 
     %0(s32) = COPY $w0
     %1(s32) = G_ICMP intpred(eq), %0, %0
-    %6(s1) = G_TRUNC %1(s32)
+    %6(s8) = G_TRUNC %1(s32)
     %9(s32) = G_ANYEXT %6
     $w0 = COPY %9(s32)
 
     %2(s64) = COPY $x0
     %3(s32) = G_ICMP intpred(uge), %2, %2
-    %7(s1) = G_TRUNC %3(s32)
+    %7(s8) = G_TRUNC %3(s32)
     %10(s32) = G_ANYEXT %7
     $w0 = COPY %10(s32)
 
     %4(p0) = COPY $x0
     %5(s32) = G_ICMP intpred(ne), %4, %4
-    %8(s1) = G_TRUNC %5(s32)
+    %8(s8) = G_TRUNC %5(s32)
     %11(s32) = G_ANYEXT %8
     $w0 = COPY %11(s32)
 ...
@@ -199,13 +199,13 @@ body:             |
 
     %0(s32) = COPY $s0
     %1(s32) = G_FCMP floatpred(one), %0, %0
-    %4(s1) = G_TRUNC %1(s32)
+    %4(s8) = G_TRUNC %1(s32)
     %6(s32) = G_ANYEXT %4
     $w0 = COPY %6(s32)
 
     %2(s64) = COPY $d0
     %3(s32) = G_FCMP floatpred(uge), %2, %2
-    %5(s1) = G_TRUNC %3(s32)
+    %5(s8) = G_TRUNC %3(s32)
     %7(s32) = G_ANYEXT %5
     $w0 = COPY %7(s32)
 
@@ -238,12 +238,11 @@ body:             |
     successors: %bb.1
     %0(s32) = COPY $s0
     %3:gpr(s32) = COPY $w0
-    %1(s1) = G_TRUNC %3
 
   bb.1:
     successors: %bb.1, %bb.2
     %2(s32) = PHI %0, %bb.0, %2, %bb.1
-    G_BRCOND %1, %bb.1
+    G_BRCOND %3, %bb.1
 
   bb.2:
     $s0 = COPY %2
@@ -258,7 +257,7 @@ regBankSelected: true
 tracksRegLiveness: true
 
 # CHECK:      registers:
-# CHECK-NEXT:  - { id: 0, class: gpr32, preferred-register: '' }
+# CHECK-NEXT:  - { id: 0, class: gpr, preferred-register: '' }
 # CHECK-NEXT:  - { id: 1, class: gpr32, preferred-register: '' }
 # CHECK-NEXT:  - { id: 2, class: gpr32, preferred-register: '' }
 # CHECK-NEXT:  - { id: 3, class: gpr32, preferred-register: '' }
@@ -268,6 +267,7 @@ tracksRegLiveness: true
 # CHECK-NEXT:  - { id: 7, class: gpr64, preferred-register: '' }
 # CHECK-NEXT:  - { id: 8, class: gpr64, preferred-register: '' }
 # CHECK-NEXT:  - { id: 9, class: gpr64, preferred-register: '' }
+# CHECK-NEXT:  - { id: 10, class: gpr32, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: gpr }
@@ -291,20 +291,19 @@ body:             |
   bb.0:
     liveins: $w0, $w1, $w2
     %10:gpr(s32) = COPY $w0
-    %0(s1) = G_TRUNC %10
 
     %1(s32) = COPY $w1
     %2(s32) = COPY $w2
-    %3(s32) = G_SELECT %0, %1, %2
+    %3(s32) = G_SELECT %10, %1, %2
     $w0 = COPY %3(s32)
 
     %4(s64) = COPY $x0
     %5(s64) = COPY $x1
-    %6(s64) = G_SELECT %0, %4, %5
+    %6(s64) = G_SELECT %10, %4, %5
     $x0 = COPY %6(s64)
 
     %7(p0) = COPY $x0
     %8(p0) = COPY $x1
-    %9(p0) = G_SELECT %0, %7, %8
+    %9(p0) = G_SELECT %10, %7, %8
     $x0 = COPY %9(p0)
 ...

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir b/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
index 9393f2bb81cd..3f5045f3cb89 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/speculative-hardening-brcond.mir
@@ -29,8 +29,7 @@ body:             |
     liveins: $w0
     successors: %bb.0, %bb.1
     %reg:gpr(s32) = COPY $w0
-    %cond:gpr(s1) = G_TRUNC %reg
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %reg, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -55,8 +54,7 @@ body:             |
     %reg:gpr(s32) = COPY $w0
     %zero:gpr(s32) = G_CONSTANT i32 0
     %cmp:gpr(s32) = G_ICMP intpred(eq), %reg, %zero
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -82,8 +80,7 @@ body:             |
     %reg0:fpr(s32) = COPY $s0
     %reg1:fpr(s32) = COPY $s1
     %cmp:gpr(s32) = G_FCMP floatpred(oeq), %reg0, %reg1
-    %cond:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cond(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
index 3c99623d57e5..e7167207da55 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/subreg-copy.mir
@@ -28,8 +28,7 @@ body: |
     %8:gpr(s64) = COPY %2:fpr(s64)
     %4:gpr(s64) = G_AND %8:gpr, %3:gpr
     %7:gpr(s32) = G_ICMP intpred(ne), %4:gpr(s64), %5:gpr
-    %6:gpr(s1) = G_TRUNC %7:gpr(s32)
-    G_BRCOND %6:gpr(s1), %bb.1
+    G_BRCOND %7, %bb.1
 
   bb.1:
     RET_ReallyLR
@@ -62,7 +61,6 @@ body: |
     %8:gpr(s64) = COPY %2:fpr(s64)
     %4:gpr(s64) = G_AND %8:gpr, %3:gpr
     %7:gpr(s32) = G_ICMP intpred(ne), %4:gpr(s64), %5:gpr
-    %6:gpr(s1) = G_TRUNC %7:gpr(s32)
-    G_BRCOND %6:gpr(s1), %bb.1
+    G_BRCOND %7, %bb.1
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir
index 93ee6615d119..ce87e73b6d0f 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/tbnz-slt.mir
@@ -30,8 +30,7 @@ body:             |
     %copy:gpr(s64) = COPY $x0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %cmp:gpr(s32) = G_ICMP intpred(slt), %copy(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -60,8 +59,7 @@ body:             |
     %copy:gpr(s32) = COPY $w0
     %zero:gpr(s32) = G_CONSTANT i32 0
     %cmp:gpr(s32) = G_ICMP intpred(slt), %copy(s32), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -91,8 +89,7 @@ body:             |
     %copy:gpr(s32) = COPY $w0
     %one:gpr(s32) = G_CONSTANT i32 1
     %cmp:gpr(s32) = G_ICMP intpred(slt), %copy(s32), %one
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -125,8 +122,7 @@ body:             |
     %c:gpr(s64) = G_CONSTANT i64 8
     %and:gpr(s64) = G_AND %copy, %bit
     %cmp:gpr(s32) = G_ICMP intpred(slt), %and(s64), %zero
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -157,8 +153,7 @@ body:             |
     %copy:gpr(s64) = COPY $x0
     %zero:gpr(s64) = G_CONSTANT i64 0
     %cmp:gpr(s32) = G_ICMP intpred(slt), %zero, %copy(s64)
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir b/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
index 7587fd38087c..d688561dc0fd 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/tbz-sgt.mir
@@ -30,8 +30,7 @@ body:             |
     %copy:gpr(s64) = COPY $x0
     %negative_one:gpr(s64) = G_CONSTANT i64 -1
     %cmp:gpr(s32) = G_ICMP intpred(sgt), %copy(s64), %negative_one
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -60,8 +59,7 @@ body:             |
     %copy:gpr(s32) = COPY $w0
     %negative_one:gpr(s32) = G_CONSTANT i32 -1
     %cmp:gpr(s32) = G_ICMP intpred(sgt), %copy(s32), %negative_one
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -91,8 +89,7 @@ body:             |
     %copy:gpr(s32) = COPY $w0
     %one:gpr(s32) = G_CONSTANT i32 1
     %cmp:gpr(s32) = G_ICMP intpred(sgt), %copy(s32), %one
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -126,8 +123,7 @@ body:             |
     %c:gpr(s64) = G_CONSTANT i64 8
     %and:gpr(s64) = G_AND %copy, %bit
     %cmp:gpr(s32) = G_ICMP intpred(sgt), %and(s64), %negative_one
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -158,8 +154,7 @@ body:             |
     %copy:gpr(s64) = COPY $x0
     %negative_one:gpr(s64) = G_CONSTANT i64 -1
     %cmp:gpr(s32) = G_ICMP intpred(sgt), %negative_one, %copy(s64)
-    %cmp_trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %cmp_trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/widen-narrow-tbz-tbnz.mir b/llvm/test/CodeGen/AArch64/GlobalISel/widen-narrow-tbz-tbnz.mir
index d5143a93ebbf..2f8409f9fd3e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/widen-narrow-tbz-tbnz.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/widen-narrow-tbz-tbnz.mir
@@ -5,8 +5,6 @@
 # or SUBREG_TO_REG.
 --- |
  @glob = external dso_local unnamed_addr global i1, align 4
- define void @s1_no_copy() { ret void }
- define void @s16_no_copy() { ret void }
  define void @p0_no_copy() { ret void }
  define void @widen_s32_to_s64() { ret void }
  define void @widen_s16_to_s64() { ret void }
@@ -14,62 +12,6 @@
 
 ...
 ---
-name:            s1_no_copy
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  ; CHECK-LABEL: name: s1_no_copy
-  ; CHECK: bb.0:
-  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %narrow:gpr32 = IMPLICIT_DEF
-  ; CHECK-NEXT:   TBNZW %narrow, 0, %bb.1
-  ; CHECK-NEXT:   B %bb.0
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT: bb.1:
-  ; CHECK-NEXT:   RET_ReallyLR
-  bb.0:
-    successors: %bb.0, %bb.1
-    %narrow:gpr(s1) = G_IMPLICIT_DEF
-
-    ; There should be no copy here, because the s1 can be selected to a GPR32.
-    G_BRCOND %narrow(s1), %bb.1
-    G_BR %bb.0
-  bb.1:
-    RET_ReallyLR
-...
----
-name:            s16_no_copy
-alignment:       4
-legalized:       true
-regBankSelected: true
-tracksRegLiveness: true
-body:             |
-  ; CHECK-LABEL: name: s16_no_copy
-  ; CHECK: bb.0:
-  ; CHECK-NEXT:   successors: %bb.0(0x40000000), %bb.1(0x40000000)
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   %narrow:gpr32 = IMPLICIT_DEF
-  ; CHECK-NEXT:   TBNZW %narrow, 0, %bb.1
-  ; CHECK-NEXT:   B %bb.0
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT: bb.1:
-  ; CHECK-NEXT:   RET_ReallyLR
-  bb.0:
-    successors: %bb.0, %bb.1
-    %narrow:gpr(s16) = G_IMPLICIT_DEF
-    %trunc:gpr(s1) = G_TRUNC %narrow(s16)
-
-    ; Look through the G_TRUNC to get the G_IMPLICIT_DEF. We don't need a
-    ; SUBREG_TO_REG here, because the s16 will end up on a 32-bit register.
-    G_BRCOND %trunc(s1), %bb.1
-    G_BR %bb.0
-  bb.1:
-    RET_ReallyLR
-...
----
 name:            p0_no_copy
 alignment:       4
 legalized:       true
@@ -90,12 +32,11 @@ body:             |
   bb.0:
     successors: %bb.0, %bb.1
     %glob:gpr(p0) = G_GLOBAL_VALUE @glob
-    %load:gpr(s8) = G_LOAD %glob(p0) :: (dereferenceable load (s8) from @glob, align 4)
-    %trunc:gpr(s1) = G_TRUNC %load(s8)
+    %load:gpr(s32) = G_LOAD %glob(p0) :: (dereferenceable load (s8) from @glob, align 4)
 
     ; Look through G_TRUNC to get the load. The load is into a s8, which will
     ; be selected to a GPR32, so we don't need a copy.
-    G_BRCOND %trunc(s1), %bb.1
+    G_BRCOND %load, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -132,8 +73,7 @@ body:             |
 
     ; We should widen using a SUBREG_TO_REG here, because we need a TBZX to get
     ; bit 33. The subregister should be sub_32.
-    %trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -169,8 +109,7 @@ body:             |
     ; We should widen using a SUBREG_TO_REG here, because we need a TBZX to get
     ; bit 33. The subregister should be sub_32, because s16 will end up on a
     ; GPR32.
-    %trunc:gpr(s1) = G_TRUNC %cmp(s32)
-    G_BRCOND %trunc(s1), %bb.1
+    G_BRCOND %cmp, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR
@@ -201,8 +140,8 @@ body:             |
     %wide:gpr(s64) = COPY $x0
 
     ; We should narrow using a subregister copy here.
-    %trunc:gpr(s1) = G_TRUNC %wide(s64)
-    G_BRCOND %trunc(s1), %bb.1
+    %trunc:gpr(s32) = G_TRUNC %wide(s64)
+    G_BRCOND %trunc, %bb.1
     G_BR %bb.0
   bb.1:
     RET_ReallyLR

diff  --git a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
index 2a3a9fc361f6..b283fcaf13e2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-xaluo.ll
@@ -25,8 +25,7 @@ define zeroext i1 @saddo1.i32(i32 %v1, i32 %v2, i32* %res) {
 ; GISEL-LABEL: saddo1.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds w8, w0, w1
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -57,8 +56,7 @@ define zeroext i1 @saddo2.i32(i32 %v1, i32* %res) {
 ; GISEL-LABEL: saddo2.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds w8, w0, #4
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -89,8 +87,7 @@ define zeroext i1 @saddo3.i32(i32 %v1, i32* %res) {
 ; GISEL-LABEL: saddo3.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    subs w8, w0, #4
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -124,8 +121,7 @@ define zeroext i1 @saddo4.i32(i32 %v1, i32* %res) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    mov w8, #16777215
 ; GISEL-NEXT:    adds w8, w0, w8
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -156,8 +152,7 @@ define zeroext i1 @saddo5.i32(i32 %v1, i32 %v2, i32* %res) {
 ; GISEL-LABEL: saddo5.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds w8, w0, w1, lsl #16
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -188,8 +183,7 @@ define zeroext i1 @saddo1.i64(i64 %v1, i64 %v2, i64* %res) {
 ; GISEL-LABEL: saddo1.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds x8, x0, x1
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str x8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -219,8 +213,7 @@ define zeroext i1 @saddo2.i64(i64 %v1, i64* %res) {
 ; GISEL-LABEL: saddo2.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds x8, x0, #4
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str x8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -250,8 +243,7 @@ define zeroext i1 @saddo3.i64(i64 %v1, i64* %res) {
 ; GISEL-LABEL: saddo3.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    subs x8, x0, #4
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str x8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -281,8 +273,7 @@ define zeroext i1 @uaddo.i32(i32 %v1, i32 %v2, i32* %res) {
 ; GISEL-LABEL: uaddo.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds w8, w0, w1
-; GISEL-NEXT:    cset w9, hs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, hs
 ; GISEL-NEXT:    str w8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -312,8 +303,7 @@ define zeroext i1 @uaddo.i64(i64 %v1, i64 %v2, i64* %res) {
 ; GISEL-LABEL: uaddo.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds x8, x0, x1
-; GISEL-NEXT:    cset w9, hs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, hs
 ; GISEL-NEXT:    str x8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -343,8 +333,7 @@ define zeroext i1 @ssubo1.i32(i32 %v1, i32 %v2, i32* %res) {
 ; GISEL-LABEL: ssubo1.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    subs w8, w0, w1
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -374,8 +363,7 @@ define zeroext i1 @ssubo2.i32(i32 %v1, i32* %res) {
 ; GISEL-LABEL: ssubo2.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds w8, w0, #4
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str w8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -405,8 +393,7 @@ define zeroext i1 @ssubo.i64(i64 %v1, i64 %v2, i64* %res) {
 ; GISEL-LABEL: ssubo.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    subs x8, x0, x1
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str x8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -436,8 +423,7 @@ define zeroext i1 @usubo.i32(i32 %v1, i32 %v2, i32* %res) {
 ; GISEL-LABEL: usubo.i32:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    subs w8, w0, w1
-; GISEL-NEXT:    cset w9, lo
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, lo
 ; GISEL-NEXT:    str w8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -467,8 +453,7 @@ define zeroext i1 @usubo.i64(i64 %v1, i64 %v2, i64* %res) {
 ; GISEL-LABEL: usubo.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    subs x8, x0, x1
-; GISEL-NEXT:    cset w9, lo
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, lo
 ; GISEL-NEXT:    str x8, [x2]
 ; GISEL-NEXT:    ret
 entry:
@@ -569,8 +554,7 @@ define zeroext i1 @smulo2.i64(i64 %v1, i64* %res) {
 ; GISEL-LABEL: smulo2.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds x8, x0, x0
-; GISEL-NEXT:    cset w9, vs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, vs
 ; GISEL-NEXT:    str x8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -675,8 +659,7 @@ define zeroext i1 @umulo2.i64(i64 %v1, i64* %res) {
 ; GISEL-LABEL: umulo2.i64:
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    adds x8, x0, x0
-; GISEL-NEXT:    cset w9, hs
-; GISEL-NEXT:    ubfx w0, w9, #0, #1
+; GISEL-NEXT:    cset w0, hs
 ; GISEL-NEXT:    str x8, [x1]
 ; GISEL-NEXT:    ret
 entry:
@@ -735,8 +718,7 @@ define i1 @saddo.not.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn w0, w1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -789,8 +771,7 @@ define i1 @saddo.not.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn x0, x1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -843,8 +824,7 @@ define i1 @uaddo.not.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn w0, w1
 ; GISEL-NEXT:    cset w8, hs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -897,8 +877,7 @@ define i1 @uaddo.not.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn x0, x1
 ; GISEL-NEXT:    cset w8, hs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -951,8 +930,7 @@ define i1 @ssubo.not.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp w0, w1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -1005,8 +983,7 @@ define i1 @ssub.not.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp x0, x1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -1059,8 +1036,7 @@ define i1 @usubo.not.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp w0, w1
 ; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -1113,8 +1089,7 @@ define i1 @usubo.not.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp x0, x1
 ; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2155,8 +2130,7 @@ define zeroext i1 @saddo.br.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn w0, w1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2191,8 +2165,7 @@ define zeroext i1 @saddo.br.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn x0, x1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2227,8 +2200,7 @@ define zeroext i1 @uaddo.br.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn w0, w1
 ; GISEL-NEXT:    cset w8, hs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2263,8 +2235,7 @@ define zeroext i1 @uaddo.br.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn x0, x1
 ; GISEL-NEXT:    cset w8, hs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2299,8 +2270,7 @@ define zeroext i1 @ssubo.br.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp w0, w1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2335,8 +2305,7 @@ define zeroext i1 @ssubo.br.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp x0, x1
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2371,8 +2340,7 @@ define zeroext i1 @usubo.br.i32(i32 %v1, i32 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp w0, w1
 ; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
@@ -2407,8 +2375,7 @@ define zeroext i1 @usubo.br.i64(i64 %v1, i64 %v2) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmp x0, x1
 ; GISEL-NEXT:    cset w8, lo
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
@@ -2524,8 +2491,7 @@ define zeroext i1 @smulo2.br.i64(i64 %v1) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn x0, x0
 ; GISEL-NEXT:    cset w8, vs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 2)
@@ -2637,8 +2603,7 @@ define zeroext i1 @umulo2.br.i64(i64 %v1) {
 ; GISEL:       // %bb.0: // %entry
 ; GISEL-NEXT:    cmn x0, x0
 ; GISEL-NEXT:    cset w8, hs
-; GISEL-NEXT:    eor w8, w8, #0x1
-; GISEL-NEXT:    and w0, w8, #0x1
+; GISEL-NEXT:    eor w0, w8, #0x1
 ; GISEL-NEXT:    ret
 entry:
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 2)


        


More information about the llvm-commits mailing list