[llvm] 0342aef - [GlobalISel][X86] Add handling of scalar G_UADDO/G_USUBO opcodes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 16 07:15:41 PDT 2023


Author: Simon Pilgrim
Date: 2023-06-16T15:15:27+01:00
New Revision: 0342aefa32efe91780c9c39c17e8c68c2684d26f

URL: https://github.com/llvm/llvm-project/commit/0342aefa32efe91780c9c39c17e8c68c2684d26f
DIFF: https://github.com/llvm/llvm-project/commit/0342aefa32efe91780c9c39c17e8c68c2684d26f.diff

LOG: [GlobalISel][X86] Add handling of scalar G_UADDO/G_USUBO opcodes

This finally allows x86 globalisel to lower addition/subtraction of illegal types without fallback :)

Added: 
    

Modified: 
    llvm/lib/Target/X86/X86InstructionSelector.cpp
    llvm/lib/Target/X86/X86LegalizerInfo.cpp
    llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
    llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir
    llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
    llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir
    llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir
    llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 71162950f3d2f..404a8da53a442 100644
--- a/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -92,8 +92,8 @@ class X86InstructionSelector : public InstructionSelector {
                  MachineFunction &MF) const;
   bool selectFCmp(MachineInstr &I, MachineRegisterInfo &MRI,
                   MachineFunction &MF) const;
-  bool selectUAddSubE(MachineInstr &I, MachineRegisterInfo &MRI,
-                   MachineFunction &MF) const;
+  bool selectUAddSub(MachineInstr &I, MachineRegisterInfo &MRI,
+                     MachineFunction &MF) const;
   bool selectDebugInstr(MachineInstr &I, MachineRegisterInfo &MRI) const;
   bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
   bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -403,8 +403,10 @@ bool X86InstructionSelector::select(MachineInstr &I) {
   case TargetOpcode::G_FCMP:
     return selectFCmp(I, MRI, MF);
   case TargetOpcode::G_UADDE:
+  case TargetOpcode::G_UADDO:
   case TargetOpcode::G_USUBE:
-    return selectUAddSubE(I, MRI, MF);
+  case TargetOpcode::G_USUBO:
+    return selectUAddSub(I, MRI, MF);
   case TargetOpcode::G_UNMERGE_VALUES:
     return selectUnmergeValues(I, MRI, MF);
   case TargetOpcode::G_MERGE_VALUES:
@@ -1070,22 +1072,26 @@ bool X86InstructionSelector::selectFCmp(MachineInstr &I,
   return true;
 }
 
-bool X86InstructionSelector::selectUAddSubE(MachineInstr &I,
-                                         MachineRegisterInfo &MRI,
-                                         MachineFunction &MF) const {
+bool X86InstructionSelector::selectUAddSub(MachineInstr &I,
+                                           MachineRegisterInfo &MRI,
+                                           MachineFunction &MF) const {
   assert((I.getOpcode() == TargetOpcode::G_UADDE ||
-          I.getOpcode() == TargetOpcode::G_USUBE) &&
+          I.getOpcode() == TargetOpcode::G_UADDO ||
+          I.getOpcode() == TargetOpcode::G_USUBE ||
+          I.getOpcode() == TargetOpcode::G_USUBO) &&
          "unexpected instruction");
 
   const Register DstReg = I.getOperand(0).getReg();
   const Register CarryOutReg = I.getOperand(1).getReg();
   const Register Op0Reg = I.getOperand(2).getReg();
   const Register Op1Reg = I.getOperand(3).getReg();
-  Register CarryInReg = I.getOperand(4).getReg();
-  bool IsSub = I.getOpcode() == TargetOpcode::G_USUBE;
+  bool IsSub = I.getOpcode() == TargetOpcode::G_USUBE ||
+               I.getOpcode() == TargetOpcode::G_USUBO;
+  bool HasCarryIn = I.getOpcode() == TargetOpcode::G_UADDE ||
+                    I.getOpcode() == TargetOpcode::G_USUBE;
 
   const LLT DstTy = MRI.getType(DstReg);
-  assert(DstTy.isScalar() && "G_UADDE only supported for scalar types");
+  assert(DstTy.isScalar() && "selectUAddSub only supported for scalar types");
 
   // TODO: Handle immediate argument variants?
   unsigned OpADC, OpADD, OpSBB, OpSUB;
@@ -1115,38 +1121,46 @@ bool X86InstructionSelector::selectUAddSubE(MachineInstr &I,
     OpSUB = X86::SUB64rr;
     break;
   default:
-    llvm_unreachable("Can't select G_UADDE/G_USUBE, unsupported type.");
+    llvm_unreachable("selectUAddSub unsupported type.");
   }
 
   const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
   const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
 
-  // find CarryIn def instruction.
-  MachineInstr *Def = MRI.getVRegDef(CarryInReg);
-  while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
-    CarryInReg = Def->getOperand(1).getReg();
-    Def = MRI.getVRegDef(CarryInReg);
-  }
-
-  unsigned Opcode = 0;
-  if (Def->getOpcode() == I.getOpcode()) {
-    // carry set by prev ADD/SUB.
-
-    BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
-        .addReg(CarryInReg);
+  unsigned Opcode = IsSub ? OpSUB : OpADD;
 
-    if (!RBI.constrainGenericRegister(CarryInReg, *DstRC, MRI))
-      return false;
+  // G_UADDE/G_USUBE - find CarryIn def instruction.
+  if (HasCarryIn) {
+    Register CarryInReg = I.getOperand(4).getReg();
+    MachineInstr *Def = MRI.getVRegDef(CarryInReg);
+    while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
+      CarryInReg = Def->getOperand(1).getReg();
+      Def = MRI.getVRegDef(CarryInReg);
+    }
 
-    Opcode = IsSub ? OpSBB : OpADC;
-  } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
-    // carry is constant, support only 0.
-    if (*val != 0)
+    // TODO - handle more CF generating instructions
+    if (Def->getOpcode() == TargetOpcode::G_UADDE ||
+        Def->getOpcode() == TargetOpcode::G_UADDO ||
+        Def->getOpcode() == TargetOpcode::G_USUBE ||
+        Def->getOpcode() == TargetOpcode::G_USUBO) {
+      // carry set by prev ADD/SUB.
+      BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY),
+              X86::EFLAGS)
+          .addReg(CarryInReg);
+
+      if (!RBI.constrainGenericRegister(CarryInReg, *DstRC, MRI))
+        return false;
+
+      Opcode = IsSub ? OpSBB : OpADC;
+    } else if (auto val = getIConstantVRegVal(CarryInReg, MRI)) {
+      // carry is constant, support only 0.
+      if (*val != 0)
+        return false;
+
+      Opcode = IsSub ? OpSUB : OpADD;
+    } else
       return false;
-
-    Opcode = IsSub ? OpSUB : OpADD;
-  } else
-    return false;
+  }
 
   MachineInstr &Inst =
       *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)

diff  --git a/llvm/lib/Target/X86/X86LegalizerInfo.cpp b/llvm/lib/Target/X86/X86LegalizerInfo.cpp
index 406c9d423affa..edc3950a60f62 100644
--- a/llvm/lib/Target/X86/X86LegalizerInfo.cpp
+++ b/llvm/lib/Target/X86/X86LegalizerInfo.cpp
@@ -91,8 +91,8 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
     getActionDefinitionsBuilder(Op)
         .widenScalarToNextPow2(LitTyIdx, /*Min=*/8)
         .widenScalarToNextPow2(BigTyIdx, /*Min=*/16)
-        .clampScalar(LitTyIdx, s8, Is64Bit ? s64 : s32)
-        .clampScalar(BigTyIdx, s32, Is64Bit ? s128 : s64)
+        .minScalar(LitTyIdx, s8)
+        .minScalar(BigTyIdx, s32)
         .legalIf([=](const LegalityQuery &Q) {
           switch (Q.Types[BigTyIdx].getSizeInBits()) {
           case 16:
@@ -148,8 +148,7 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
       .clampScalar(0, s8, sMaxScalar)
       .scalarize(0);
 
-  // TODO: Add G_UADDO/G_USUBO handling
-  getActionDefinitionsBuilder({G_UADDE, G_USUBE})
+  getActionDefinitionsBuilder({G_UADDE, G_UADDO, G_USUBE, G_USUBO})
       .legalIf([=](const LegalityQuery &Query) -> bool {
         return typePairInSet(0, 1, {{s8, s1}, {s16, s1}, {s32, s1}})(Query) ||
                (Is64Bit && typePairInSet(0, 1, {{s64, s1}})(Query));

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
index 7805a3c8fbc9d..bf7a327257fcc 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -1,14 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu   -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X86
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu   -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X86
 
 define i128 @test_add_i128(i128 %arg1, i128 %arg2) nounwind {
 ; X64-LABEL: test_add_i128:
 ; X64:       # %bb.0:
-; X64-NEXT:    movq %rdi, %rax
-; X64-NEXT:    addq %rdx, %rax
-; X64-NEXT:    adcq %rcx, %rsi
-; X64-NEXT:    movq %rsi, %rdx
+; X64-NEXT:    movq %rdx, %rax
+; X64-NEXT:    addq %rdi, %rax
+; X64-NEXT:    adcq %rsi, %rcx
+; X64-NEXT:    movq %rcx, %rdx
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_add_i128:
@@ -20,17 +20,17 @@ define i128 @test_add_i128(i128 %arg1, i128 %arg2) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    addl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    adcl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %ecx, 8(%eax)
-; X86-NEXT:    movl %edi, 4(%eax)
-; X86-NEXT:    movl %esi, (%eax)
-; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    movl %edx, 4(%eax)
+; X86-NEXT:    movl %esi, 8(%eax)
+; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
-; X86-NEXT:    retl $4
+; X86-NEXT:    retl
   %ret = add i128 %arg1, %arg2
   ret i128 %ret
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
index 224684113b9db..a595324edbbea 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-add.mir
@@ -1,15 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -O0 -mtriple=x86_64-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X64
-# RUN: FileCheck -check-prefix=ERR64  %s < %t
-
-# RUN: llc -O0 -mtriple=i386-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X32
-# RUN: FileCheck -check-prefix=ERR32  %s < %t
-
-# ERR64: remark: <unknown>:0:0: unable to legalize instruction: %9:_(s64), %10:_(s1) = G_UADDO %5:_, %7:_ (in function: test_add_i128)
-
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %11:_(s32), %12:_(s1) = G_UADDO %7:_, %9:_ (in function: test_add_i42)
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %7:_(s32), %8:_(s1) = G_UADDO %3:_, %5:_ (in function: test_add_i64)
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %13:_(s32), %14:_(s1) = G_UADDO %5:_, %9:_ (in function: test_add_i128)
+# RUN: llc -O0 -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X64
+# RUN: llc -O0 -mtriple=i386-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X32
 
 --- |
 
@@ -163,17 +154,12 @@ body:             |
     ; X64-NEXT: RET 0
     ; X32-LABEL: name: test_add_i42
     ; X32: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx
-    ; X32-NEXT: [[TRUNC:%[0-9]+]]:_(s42) = G_TRUNC [[COPY]](s64)
-    ; X32-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s42)
-    ; X32-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s42)
-    ; X32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
-    ; X32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT1]](s64)
+    ; X32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; X32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; X32-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
     ; X32-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
     ; X32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; X32-NEXT: [[TRUNC1:%[0-9]+]]:_(s42) = G_TRUNC [[MV]](s64)
-    ; X32-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s42)
-    ; X32-NEXT: $rax = COPY [[ANYEXT2]](s64)
+    ; X32-NEXT: $rax = COPY [[MV]](s64)
     ; X32-NEXT: RET 0
     %0(s64) = COPY $rdx
     %1(s42) = G_TRUNC %0(s64)
@@ -233,10 +219,8 @@ body:             |
     ; X64-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF1]](s128)
     ; X64-NEXT: [[UADDO:%[0-9]+]]:_(s64), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
     ; X64-NEXT: [[UADDE:%[0-9]+]]:_(s64), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
-    ; X64-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UADDO]](s64), [[UADDE]](s64)
-    ; X64-NEXT: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
-    ; X64-NEXT: $rax = COPY [[UV4]](s64)
-    ; X64-NEXT: $rdx = COPY [[UV5]](s64)
+    ; X64-NEXT: $rax = COPY [[UADDO]](s64)
+    ; X64-NEXT: $rdx = COPY [[UADDE]](s64)
     ; X64-NEXT: RET 0
     ; X32-LABEL: name: test_add_i128
     ; X32: [[DEF:%[0-9]+]]:_(s128) = IMPLICIT_DEF
@@ -247,10 +231,10 @@ body:             |
     ; X32-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV5]], [[UADDO1]]
     ; X32-NEXT: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV2]], [[UV6]], [[UADDE1]]
     ; X32-NEXT: [[UADDE4:%[0-9]+]]:_(s32), [[UADDE5:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV7]], [[UADDE3]]
-    ; X32-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32), [[UADDE2]](s32), [[UADDE4]](s32)
-    ; X32-NEXT: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
-    ; X32-NEXT: $rax = COPY [[UV8]](s64)
-    ; X32-NEXT: $rdx = COPY [[UV9]](s64)
+    ; X32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+    ; X32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDE2]](s32), [[UADDE4]](s32)
+    ; X32-NEXT: $rax = COPY [[MV]](s64)
+    ; X32-NEXT: $rdx = COPY [[MV1]](s64)
     ; X32-NEXT: RET 0
     %0(s128) = IMPLICIT_DEF
     %1(s128) = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir
index 5a0b341650907..19fe5b84c73ce 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-leading-zeros.mir
@@ -3,7 +3,6 @@
 # RUN: llc -mtriple=i386-linux-gnu -mattr=+lzcnt -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X86
 # RUN: FileCheck -check-prefix=ERR32  %s < %t
 
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %12:_(s32), %13:_(s1) = G_USUBO %8:_, %10:_ (in function: test_ctlz35)
 # ERR32: remark: <unknown>:0:0: unable to legalize instruction: %10:_(s64) = G_CTLZ_ZERO_UNDEF %4:_(s32) (in function: test_ctlz64)
 
 # test count leading zeros for s16, s32, and s64
@@ -30,16 +29,29 @@ body:             |
     ; X86: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx
     ; X86-NEXT: [[TRUNC:%[0-9]+]]:_(s35) = G_TRUNC [[COPY]](s64)
     ; X86-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s35)
-    ; X86-NEXT: [[CTLZ:%[0-9]+]]:_(s64) = G_CTLZ [[ZEXT]](s64)
-    ; X86-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 29
-    ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CTLZ]](s64)
-    ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
-    ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
-    ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
-    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
-    ; X86-NEXT: [[TRUNC1:%[0-9]+]]:_(s35) = G_TRUNC [[MV]](s64)
-    ; X86-NEXT: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s35)
-    ; X86-NEXT: RET 0, implicit [[ZEXT1]](s64)
+    ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
+    ; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV1]](s32), [[C]]
+    ; X86-NEXT: [[CTLZ:%[0-9]+]]:_(s64) = G_CTLZ [[UV]](s32)
+    ; X86-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+    ; X86-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[CTLZ]], [[C1]]
+    ; X86-NEXT: [[CTLZ_ZERO_UNDEF:%[0-9]+]]:_(s64) = G_CTLZ_ZERO_UNDEF [[UV1]](s32)
+    ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD]](s64)
+    ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CTLZ_ZERO_UNDEF]](s64)
+    ; X86-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
+    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV2]], [[UV4]]
+    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV3]], [[UV5]]
+    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
+    ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 29
+    ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C2]](s32), [[C]](s32)
+    ; X86-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+    ; X86-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV1]](s64)
+    ; X86-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV6]], [[UV8]]
+    ; X86-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV7]], [[UV9]], [[USUBO1]]
+    ; X86-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+    ; X86-NEXT: [[TRUNC1:%[0-9]+]]:_(s35) = G_TRUNC [[MV2]](s64)
+    ; X86-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s35)
+    ; X86-NEXT: RET 0, implicit [[ZEXT2]](s64)
     %0(s64) = COPY $rdx
     %1:_(s35) = G_TRUNC %0(s64)
     %2:_(s35) = G_CTLZ %1

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
index 1a14ab9721626..ce45ed7407fd2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-sub.mir
@@ -1,15 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -O0 -mtriple=x86_64-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X64
-# RUN: FileCheck -check-prefix=ERR64  %s < %t
-
-# RUN: llc -O0 -mtriple=i386-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X32
-# RUN: FileCheck -check-prefix=ERR32  %s < %t
-
-# ERR64: remark: <unknown>:0:0: unable to legalize instruction: %9:_(s64), %10:_(s1) = G_USUBO %5:_, %7:_ (in function: test_sub_i128)
-
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %11:_(s32), %12:_(s1) = G_USUBO %7:_, %9:_ (in function: test_sub_i42)
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %7:_(s32), %8:_(s1) = G_USUBO %3:_, %5:_ (in function: test_sub_i64)
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %13:_(s32), %14:_(s1) = G_USUBO %5:_, %9:_ (in function: test_sub_i128)
+# RUN: llc -O0 -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X64
+# RUN: llc -O0 -mtriple=i386-linux-gnu -run-pass=legalizer %s -o -  | FileCheck %s --check-prefixes=CHECK,X32
 
 --- |
 
@@ -163,17 +154,12 @@ body:             |
     ; X64-NEXT: RET 0
     ; X32-LABEL: name: test_sub_i42
     ; X32: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx
-    ; X32-NEXT: [[TRUNC:%[0-9]+]]:_(s42) = G_TRUNC [[COPY]](s64)
-    ; X32-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s42)
-    ; X32-NEXT: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s42)
-    ; X32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
-    ; X32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT1]](s64)
+    ; X32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; X32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; X32-NEXT: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
     ; X32-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
     ; X32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
-    ; X32-NEXT: [[TRUNC1:%[0-9]+]]:_(s42) = G_TRUNC [[MV]](s64)
-    ; X32-NEXT: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC1]](s42)
-    ; X32-NEXT: $rax = COPY [[ANYEXT2]](s64)
+    ; X32-NEXT: $rax = COPY [[MV]](s64)
     ; X32-NEXT: RET 0
     %0(s64) = COPY $rdx
     %1(s42) = G_TRUNC %0(s64)
@@ -233,10 +219,8 @@ body:             |
     ; X64-NEXT: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[DEF1]](s128)
     ; X64-NEXT: [[USUBO:%[0-9]+]]:_(s64), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
     ; X64-NEXT: [[USUBE:%[0-9]+]]:_(s64), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
-    ; X64-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[USUBO]](s64), [[USUBE]](s64)
-    ; X64-NEXT: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
-    ; X64-NEXT: $rax = COPY [[UV4]](s64)
-    ; X64-NEXT: $rdx = COPY [[UV5]](s64)
+    ; X64-NEXT: $rax = COPY [[USUBO]](s64)
+    ; X64-NEXT: $rdx = COPY [[USUBE]](s64)
     ; X64-NEXT: RET 0
     ; X32-LABEL: name: test_sub_i128
     ; X32: [[DEF:%[0-9]+]]:_(s128) = IMPLICIT_DEF
@@ -247,10 +231,10 @@ body:             |
     ; X32-NEXT: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV5]], [[USUBO1]]
     ; X32-NEXT: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV2]], [[UV6]], [[USUBE1]]
     ; X32-NEXT: [[USUBE4:%[0-9]+]]:_(s32), [[USUBE5:%[0-9]+]]:_(s1) = G_USUBE [[UV3]], [[UV7]], [[USUBE3]]
-    ; X32-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32), [[USUBE2]](s32), [[USUBE4]](s32)
-    ; X32-NEXT: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
-    ; X32-NEXT: $rax = COPY [[UV8]](s64)
-    ; X32-NEXT: $rdx = COPY [[UV9]](s64)
+    ; X32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+    ; X32-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBE2]](s32), [[USUBE4]](s32)
+    ; X32-NEXT: $rax = COPY [[MV]](s64)
+    ; X32-NEXT: $rdx = COPY [[MV1]](s64)
     ; X32-NEXT: RET 0
     %0(s128) = IMPLICIT_DEF
     %1(s128) = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir
index d4c3e5d147506..0594793230790 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros-undef.mir
@@ -1,10 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
 # RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X64
-# RUN: llc -mtriple=i386-linux-gnu -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X86
-# RUN: FileCheck -check-prefix=ERR32  %s < %t
-
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %28:_(s32), %29:_(s1) = G_UADDO %24:_, %26:_ (in function: test_cttz35)
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %23:_(s32), %24:_(s1) = G_UADDO %19:_, %21:_ (in function: test_cttz64)
+# RUN: llc -mtriple=i386-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X86
 
 # test count trailing zeros for s16, s32, and s64
 
@@ -27,31 +23,28 @@ body:             |
     ; X64-NEXT: RET 0, implicit [[AND]](s64)
     ; X86-LABEL: name: test_cttz35
     ; X86: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx
-    ; X86-NEXT: [[TRUNC:%[0-9]+]]:_(s35) = G_TRUNC [[COPY]](s64)
-    ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s35)
-    ; X86-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 34359738368
-    ; X86-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[ANYEXT]], [[C]]
-    ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](s64)
-    ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[C1]]
-    ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s64) = G_CTTZ_ZERO_UNDEF [[UV1]](s32)
-    ; X86-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CTTZ_ZERO_UNDEF]](s64)
-    ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
-    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]]
-    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]]
-    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV]](s32)
-    ; X86-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[CTTZ_ZERO_UNDEF1]](s32)
-    ; X86-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
-    ; X86-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
-    ; X86-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
-    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV6]], [[UV8]]
-    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV7]], [[UV9]]
-    ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
-    ; X86-NEXT: [[TRUNC1:%[0-9]+]]:_(s35) = G_TRUNC [[MV1]](s64)
-    ; X86-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s35)
-    ; X86-NEXT: RET 0, implicit [[ZEXT2]](s64)
+    ; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; X86-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[C]]
+    ; X86-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV1]], [[C1]]
+    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[OR]](s32), [[C]]
+    ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR1]](s32)
+    ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C2]]
+    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]]
+    ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR]](s32)
+    ; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+    ; X86-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
+    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDO]], [[CTTZ_ZERO_UNDEF1]]
+    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDE]], [[C]]
+    ; X86-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; X86-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+    ; X86-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; X86-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT1]], [[C5]]
+    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND1]](s32), [[AND2]](s32)
+    ; X86-NEXT: RET 0, implicit [[MV]](s64)
     %0(s64) = COPY $rdx
     %1:_(s35) = G_TRUNC %0(s64)
     %2:_(s35) = G_CTTZ %1
@@ -99,23 +92,19 @@ body:             |
     ; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
     ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64)
     ; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[C]]
-    ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s64) = G_CTTZ_ZERO_UNDEF [[UV1]](s32)
-    ; X86-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CTTZ_ZERO_UNDEF]](s64)
-    ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
-    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]]
-    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]]
-    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[UV]](s32), [[C]]
+    ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV1]](s32)
+    ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C1]]
+    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]]
     ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV]](s32)
-    ; X86-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[CTTZ_ZERO_UNDEF1]](s32)
-    ; X86-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
-    ; X86-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
-    ; X86-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
-    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV6]], [[UV8]]
-    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV7]], [[UV9]]
-    ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
-    ; X86-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[MV1]](s64)
+    ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+    ; X86-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
+    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDO]], [[CTTZ_ZERO_UNDEF1]]
+    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDE]], [[C]]
+    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
+    ; X86-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
     ; X86-NEXT: RET 0, implicit [[COPY]](s64)
     %0:_(s64) = IMPLICIT_DEF
     %1:_(s64) = G_CTTZ_ZERO_UNDEF %0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir
index 5cddfe5b3d45c..9459b2ff19dac 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/legalize-trailing-zeros.mir
@@ -1,10 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+bmi -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X64
-# RUN: llc -mtriple=i386-linux-gnu -mattr=+bmi -run-pass=legalizer -global-isel-abort=2 -pass-remarks-missed='gisel*'  %s 2>%t -o - | FileCheck %s --check-prefixes=CHECK,X86
-# RUN: FileCheck -check-prefix=ERR32  %s < %t
-
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %28:_(s32), %29:_(s1) = G_UADDO %24:_, %26:_ (in function: test_cttz35)
-# ERR32: remark: <unknown>:0:0: unable to legalize instruction: %23:_(s32), %24:_(s1) = G_UADDO %19:_, %21:_ (in function: test_cttz64)
+# RUN: llc -mtriple=i386-linux-gnu -mattr=+bmi -run-pass=legalizer %s -o - | FileCheck %s --check-prefixes=CHECK,X86
 
 # test count trailing zeros for s16, s32, and s64
 
@@ -27,31 +23,28 @@ body:             |
     ; X64-NEXT: RET 0, implicit [[AND]](s64)
     ; X86-LABEL: name: test_cttz35
     ; X86: [[COPY:%[0-9]+]]:_(s64) = COPY $rdx
-    ; X86-NEXT: [[TRUNC:%[0-9]+]]:_(s35) = G_TRUNC [[COPY]](s64)
-    ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[TRUNC]](s35)
-    ; X86-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 34359738368
-    ; X86-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[ANYEXT]], [[C]]
-    ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[OR]](s64)
-    ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[C1]]
-    ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s64) = G_CTTZ_ZERO_UNDEF [[UV1]](s32)
-    ; X86-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CTTZ_ZERO_UNDEF]](s64)
-    ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
-    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]]
-    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]]
-    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
-    ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV]](s32)
-    ; X86-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[CTTZ_ZERO_UNDEF1]](s32)
-    ; X86-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
-    ; X86-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
-    ; X86-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
-    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV6]], [[UV8]]
-    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV7]], [[UV9]]
-    ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
-    ; X86-NEXT: [[TRUNC1:%[0-9]+]]:_(s35) = G_TRUNC [[MV1]](s64)
-    ; X86-NEXT: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC1]](s35)
-    ; X86-NEXT: RET 0, implicit [[ZEXT2]](s64)
+    ; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+    ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+    ; X86-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[C]]
+    ; X86-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[UV1]], [[C1]]
+    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[OR]](s32), [[C]]
+    ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR1]](s32)
+    ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ_ZERO_UNDEF]], [[C2]]
+    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]]
+    ; X86-NEXT: [[CTTZ_ZERO_UNDEF1:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[OR]](s32)
+    ; X86-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+    ; X86-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
+    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDO]], [[CTTZ_ZERO_UNDEF1]]
+    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDE]], [[C]]
+    ; X86-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; X86-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 7
+    ; X86-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; X86-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT1]], [[C5]]
+    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[AND1]](s32), [[AND2]](s32)
+    ; X86-NEXT: RET 0, implicit [[MV]](s64)
     %0(s64) = COPY $rdx
     %1:_(s35) = G_TRUNC %0(s64)
     %2:_(s35) = G_CTTZ %1
@@ -101,23 +94,19 @@ body:             |
     ; X86: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
     ; X86-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](s64)
     ; X86-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV]](s32), [[C]]
-    ; X86-NEXT: [[CTTZ:%[0-9]+]]:_(s64) = G_CTTZ [[UV1]](s32)
-    ; X86-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
-    ; X86-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CTTZ]](s64)
-    ; X86-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
-    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV2]], [[UV4]]
-    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV3]], [[UV5]], [[UADDO1]]
-    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+    ; X86-NEXT: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(eq), [[UV]](s32), [[C]]
+    ; X86-NEXT: [[CTTZ:%[0-9]+]]:_(s32) = G_CTTZ [[UV1]](s32)
+    ; X86-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+    ; X86-NEXT: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[CTTZ]], [[C1]]
+    ; X86-NEXT: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[C]], [[C]], [[UADDO1]]
     ; X86-NEXT: [[CTTZ_ZERO_UNDEF:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[UV]](s32)
-    ; X86-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[CTTZ_ZERO_UNDEF]](s32)
-    ; X86-NEXT: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
-    ; X86-NEXT: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
-    ; X86-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
-    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV6]], [[UV8]]
-    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ZEXT1]](s32), [[UV7]], [[UV9]]
-    ; X86-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
-    ; X86-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[MV1]](s64)
+    ; X86-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; X86-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+    ; X86-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
+    ; X86-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDO]], [[CTTZ_ZERO_UNDEF]]
+    ; X86-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[AND]](s32), [[UADDE]], [[C]]
+    ; X86-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[SELECT]](s32), [[SELECT1]](s32)
+    ; X86-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY [[MV]](s64)
     ; X86-NEXT: RET 0, implicit [[COPY]](s64)
     %0:_(s64) = IMPLICIT_DEF
     %1:_(s64) = G_CTTZ %0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll
index 982567c9e8c11..94ef4d201d4a1 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/sub-scalar.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu   -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X86
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu   -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X86
 
 define i128 @test_sub_i128(i128 %arg1, i128 %arg2) nounwind {
 ; X64-LABEL: test_sub_i128:
@@ -20,17 +20,17 @@ define i128 @test_sub_i128(i128 %arg1, i128 %arg2) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    subl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    subl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl %ecx, 8(%eax)
-; X86-NEXT:    movl %edi, 4(%eax)
-; X86-NEXT:    movl %esi, (%eax)
-; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    sbbl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %ecx, (%eax)
+; X86-NEXT:    movl %edx, 4(%eax)
+; X86-NEXT:    movl %esi, 8(%eax)
+; X86-NEXT:    movl %edi, 12(%eax)
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    popl %edi
-; X86-NEXT:    retl $4
+; X86-NEXT:    retl
   %ret = sub i128 %arg1, %arg2
   ret i128 %ret
 }


        


More information about the llvm-commits mailing list