[llvm] r363284 - [Codegen] Merge tail blocks with no successors after block placement

David Bolvansky via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 13 11:11:33 PDT 2019


Author: xbolva00
Date: Thu Jun 13 11:11:32 2019
New Revision: 363284

URL: http://llvm.org/viewvc/llvm-project?rev=363284&view=rev
Log:
[Codegen] Merge tail blocks with no successors after block placement

Summary:
I found the following case having tail blocks with no successors merging opportunities after block placement.

Before block placement:

bb0:
    ...
    bne a0, 0, bb2:

bb1:
    mv a0, 1
    ret 

bb2:
    ...

bb3:
    mv a0, 1
    ret

bb4:
    mv a0, -1
    ret

The conditional branch bne in bb0 is opposite to beq.

After block placement:

bb0:
    ...
    beq a0, 0, bb1

bb2:
    ...

bb4:
    mv a0, -1
    ret

bb1:
    mv a0, 1
    ret

bb3:
    mv a0, 1
    ret

After block placement, that appears new tail merging opportunity, bb1 and bb3 can be merged as one block. So the conditional constraint for merging tail blocks with no successors should be removed. In my experiment for RISC-V, it decreases code size.


Author of original patch: Jim Lin

Reviewers: haicheng, aheejin, craig.topper, rnk, RKSimon, Jim, dmgreen

Reviewed By: Jim, dmgreen

Subscribers: xbolva00, dschuff, javed.absar, sbc100, jgravelle-google, aheejin, kito-cheng, dmgreen, PkmX, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D54411

Modified:
    llvm/trunk/lib/CodeGen/BranchFolding.cpp
    llvm/trunk/test/CodeGen/Thumb2/v8_IT_3.ll
    llvm/trunk/test/CodeGen/WinEH/wineh-noret-cleanup.ll
    llvm/trunk/test/CodeGen/X86/conditional-tailcall.ll
    llvm/trunk/test/CodeGen/X86/loop-search.ll
    llvm/trunk/test/CodeGen/X86/machine-cp.ll
    llvm/trunk/test/CodeGen/X86/mul-constant-result.ll
    llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir
    llvm/trunk/test/CodeGen/X86/tail-opts.ll
    llvm/trunk/test/CodeGen/X86/tail-threshold.ll
    llvm/trunk/test/CodeGen/X86/test-shrink-bug.ll

Modified: llvm/trunk/lib/CodeGen/BranchFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/BranchFolding.cpp?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/BranchFolding.cpp (original)
+++ llvm/trunk/lib/CodeGen/BranchFolding.cpp Thu Jun 13 11:11:32 2019
@@ -1070,31 +1070,29 @@ bool BranchFolder::TryTailMergeBlocks(Ma
 
 bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
   bool MadeChange = false;
-  if (!EnableTailMerge) return MadeChange;
+  if (!EnableTailMerge)
+    return MadeChange;
 
   // First find blocks with no successors.
-  // Block placement does not create new tail merging opportunities for these
-  // blocks.
-  if (!AfterBlockPlacement) {
-    MergePotentials.clear();
-    for (MachineBasicBlock &MBB : MF) {
-      if (MergePotentials.size() == TailMergeThreshold)
-        break;
-      if (!TriedMerging.count(&MBB) && MBB.succ_empty())
-        MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB));
-    }
-
-    // If this is a large problem, avoid visiting the same basic blocks
-    // multiple times.
+  // Block placement may create new tail merging opportunities for these blocks.
+  MergePotentials.clear();
+  for (MachineBasicBlock &MBB : MF) {
     if (MergePotentials.size() == TailMergeThreshold)
-      for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
-        TriedMerging.insert(MergePotentials[i].getBlock());
-
-    // See if we can do any tail merging on those.
-    if (MergePotentials.size() >= 2)
-      MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength);
+      break;
+    if (!TriedMerging.count(&MBB) && MBB.succ_empty())
+      MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(MBB), &MBB));
   }
 
+  // If this is a large problem, avoid visiting the same basic blocks
+  // multiple times.
+  if (MergePotentials.size() == TailMergeThreshold)
+    for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i)
+      TriedMerging.insert(MergePotentials[i].getBlock());
+
+  // See if we can do any tail merging on those.
+  if (MergePotentials.size() >= 2)
+    MadeChange |= TryTailMergeBlocks(nullptr, nullptr, MinCommonTailLength);
+
   // Look at blocks (IBB) with multiple predecessors (PBB).
   // We change each predecessor to a canonical form, by
   // (1) temporarily removing any unconditional branch from the predecessor

Modified: llvm/trunk/test/CodeGen/Thumb2/v8_IT_3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/v8_IT_3.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/v8_IT_3.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb2/v8_IT_3.ll Thu Jun 13 11:11:32 2019
@@ -61,9 +61,7 @@ bb4:
 ; CHECK-PIC-NEXT: cmpeq
 ; CHECK-PIC-NEXT: beq
 ; CHECK-PIC: %bb6
-; CHECK-PIC-NEXT: movs
-; CHECK-PIC-NEXT: add
-; CHECK-PIC-NEXT: pop
+; CHECK-PIC: mov
   ret i32 0
 
 bb6:

Modified: llvm/trunk/test/CodeGen/WinEH/wineh-noret-cleanup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WinEH/wineh-noret-cleanup.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WinEH/wineh-noret-cleanup.ll (original)
+++ llvm/trunk/test/CodeGen/WinEH/wineh-noret-cleanup.ll Thu Jun 13 11:11:32 2019
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: sed -e s/.Cxx:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefix=CXX
 ; RUN: sed -e s/.Seh:// %s | llc -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefix=SEH
 
@@ -68,13 +69,13 @@ catch.body.2:
 ; SEH-NEXT:    .long   .Ltmp0 at IMGREL+1
 ; SEH-NEXT:    .long   .Ltmp1 at IMGREL+1
 ; SEH-NEXT:    .long   dummy_filter at IMGREL
-; SEH-NEXT:    .long   .LBB0_5 at IMGREL
+; SEH-NEXT:    .long   .LBB0_2 at IMGREL
 ; SEH-NEXT:    .long   .Ltmp2 at IMGREL+1
 ; SEH-NEXT:    .long   .Ltmp3 at IMGREL+1
-; SEH-NEXT:    .long   "?dtor$2@?0?test at 4HA"@IMGREL
+; SEH-NEXT:    .long   "?dtor$5@?0?test at 4HA"@IMGREL 
 ; SEH-NEXT:    .long   0
 ; SEH-NEXT:    .long   .Ltmp2 at IMGREL+1
 ; SEH-NEXT:    .long   .Ltmp3 at IMGREL+1
 ; SEH-NEXT:    .long   dummy_filter at IMGREL
-; SEH-NEXT:    .long   .LBB0_5 at IMGREL
+; SEH-NEXT:    .long   .LBB0_2 at IMGREL
 ; SEH-NEXT:  .Llsda_end0:

Modified: llvm/trunk/test/CodeGen/X86/conditional-tailcall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/conditional-tailcall.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/conditional-tailcall.ll (original)
+++ llvm/trunk/test/CodeGen/X86/conditional-tailcall.ll Thu Jun 13 11:11:32 2019
@@ -149,59 +149,59 @@ define x86_thiscallcc zeroext i1 @BlockP
 ; CHECK32:       # %bb.0: # %entry
 ; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %edx # encoding: [0x8b,0x54,0x24,0x04]
 ; CHECK32-NEXT:    testb $42, %dl # encoding: [0xf6,0xc2,0x2a]
-; CHECK32-NEXT:    je .LBB2_1 # encoding: [0x74,A]
-; CHECK32-NEXT:    # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
-; CHECK32-NEXT:  # %bb.2: # %land.rhs
+; CHECK32-NEXT:    je .LBB2_3 # encoding: [0x74,A]
+; CHECK32-NEXT:    # fixup A - offset: 1, value: .LBB2_3-1, kind: FK_PCRel_1
+; CHECK32-NEXT:  # %bb.1: # %land.rhs
 ; CHECK32-NEXT:    movb $1, %al # encoding: [0xb0,0x01]
 ; CHECK32-NEXT:    testb $44, %dl # encoding: [0xf6,0xc2,0x2c]
 ; CHECK32-NEXT:    je baz # TAILCALL
 ; CHECK32-NEXT:    # encoding: [0x74,A]
 ; CHECK32-NEXT:    # fixup A - offset: 1, value: baz-1, kind: FK_PCRel_1
-; CHECK32-NEXT:  # %bb.3: # %land.end
+; CHECK32-NEXT:  .LBB2_2: # %land.end
 ; CHECK32-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK32-NEXT:    retl $4 # encoding: [0xc2,0x04,0x00]
-; CHECK32-NEXT:  .LBB2_1:
+; CHECK32-NEXT:  .LBB2_3:
 ; CHECK32-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK32-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK32-NEXT:    retl $4 # encoding: [0xc2,0x04,0x00]
+; CHECK32-NEXT:    jmp .LBB2_2 # encoding: [0xeb,A]
+; CHECK32-NEXT:    # fixup A - offset: 1, value: .LBB2_2-1, kind: FK_PCRel_1
 ;
 ; CHECK64-LABEL: BlockPlacementTest:
 ; CHECK64:       # %bb.0: # %entry
 ; CHECK64-NEXT:    testb $42, %sil # encoding: [0x40,0xf6,0xc6,0x2a]
-; CHECK64-NEXT:    je .LBB2_1 # encoding: [0x74,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.2: # %land.rhs
+; CHECK64-NEXT:    je .LBB2_3 # encoding: [0x74,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB2_3-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.1: # %land.rhs
 ; CHECK64-NEXT:    movb $1, %al # encoding: [0xb0,0x01]
 ; CHECK64-NEXT:    testb $44, %sil # encoding: [0x40,0xf6,0xc6,0x2c]
 ; CHECK64-NEXT:    je baz # TAILCALL
 ; CHECK64-NEXT:    # encoding: [0x74,A]
 ; CHECK64-NEXT:    # fixup A - offset: 1, value: baz-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.3: # %land.end
+; CHECK64-NEXT:  .LBB2_2: # %land.end
 ; CHECK64-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK64-NEXT:    retq # encoding: [0xc3]
-; CHECK64-NEXT:  .LBB2_1:
+; CHECK64-NEXT:  .LBB2_3:
 ; CHECK64-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
-; CHECK64-NEXT:    # kill: def $al killed $al killed $eax
-; CHECK64-NEXT:    retq # encoding: [0xc3]
+; CHECK64-NEXT:    jmp .LBB2_2 # encoding: [0xeb,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB2_2-1, kind: FK_PCRel_1
 ;
 ; WIN64-LABEL: BlockPlacementTest:
 ; WIN64:       # %bb.0: # %entry
 ; WIN64-NEXT:    testb $42, %dl # encoding: [0xf6,0xc2,0x2a]
-; WIN64-NEXT:    je .LBB2_1 # encoding: [0x74,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.2: # %land.rhs
+; WIN64-NEXT:    je .LBB2_3 # encoding: [0x74,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB2_3-1, kind: FK_PCRel_1
+; WIN64-NEXT:  # %bb.1: # %land.rhs
 ; WIN64-NEXT:    movb $1, %al # encoding: [0xb0,0x01]
 ; WIN64-NEXT:    testb $44, %dl # encoding: [0xf6,0xc2,0x2c]
 ; WIN64-NEXT:    je baz # TAILCALL
 ; WIN64-NEXT:    # encoding: [0x74,A]
 ; WIN64-NEXT:    # fixup A - offset: 1, value: baz-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.3: # %land.end
+; WIN64-NEXT:  .LBB2_2: # %land.end
 ; WIN64-NEXT:    # kill: def $al killed $al killed $eax
 ; WIN64-NEXT:    retq # encoding: [0xc3]
-; WIN64-NEXT:  .LBB2_1:
+; WIN64-NEXT:  .LBB2_3:
 ; WIN64-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
-; WIN64-NEXT:    # kill: def $al killed $al killed $eax
-; WIN64-NEXT:    retq # encoding: [0xc3]
+; WIN64-NEXT:    jmp .LBB2_2 # encoding: [0xeb,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB2_2-1, kind: FK_PCRel_1
 entry:
   %and = and i32 %x, 42
   %tobool = icmp eq i32 %and, 0
@@ -369,56 +369,56 @@ define zeroext i1 @pr31257(%"class.std::
 ; CHECK64-NEXT:    .cfi_adjust_cfa_offset 8
 ; CHECK64-NEXT:    popq %r8 # encoding: [0x41,0x58]
 ; CHECK64-NEXT:    .cfi_adjust_cfa_offset -8
-; CHECK64-NEXT:    jmp .LBB3_1 # encoding: [0xeb,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_1-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  .LBB3_2: # %for.body
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
-; CHECK64-NEXT:    cmpl $2, %ecx # encoding: [0x83,0xf9,0x02]
-; CHECK64-NEXT:    je .LBB3_11 # encoding: [0x74,A]
+; CHECK64-NEXT:    jmp .LBB3_11 # encoding: [0xeb,A]
 ; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_11-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.3: # %for.body
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:  .LBB3_1: # %for.body
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
+; CHECK64-NEXT:    cmpl $2, %ecx # encoding: [0x83,0xf9,0x02]
+; CHECK64-NEXT:    je .LBB3_9 # encoding: [0x74,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.2: # %for.body
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    cmpl $1, %ecx # encoding: [0x83,0xf9,0x01]
-; CHECK64-NEXT:    je .LBB3_10 # encoding: [0x74,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_10-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.4: # %for.body
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:    je .LBB3_7 # encoding: [0x74,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_7-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.3: # %for.body
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    testl %ecx, %ecx # encoding: [0x85,0xc9]
-; CHECK64-NEXT:    jne .LBB3_12 # encoding: [0x75,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_12-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.5: # %sw.bb
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:    jne .LBB3_10 # encoding: [0x75,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_10-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.4: # %sw.bb
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    movzbl (%rdi), %edx # encoding: [0x0f,0xb6,0x17]
 ; CHECK64-NEXT:    cmpl $43, %edx # encoding: [0x83,0xfa,0x2b]
 ; CHECK64-NEXT:    movl %r8d, %ecx # encoding: [0x44,0x89,0xc1]
-; CHECK64-NEXT:    je .LBB3_12 # encoding: [0x74,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_12-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.6: # %sw.bb
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:    je .LBB3_10 # encoding: [0x74,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_10-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.5: # %sw.bb
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    cmpb $45, %dl # encoding: [0x80,0xfa,0x2d]
 ; CHECK64-NEXT:    movl %r8d, %ecx # encoding: [0x44,0x89,0xc1]
-; CHECK64-NEXT:    je .LBB3_12 # encoding: [0x74,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_12-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.7: # %if.else
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:    je .LBB3_10 # encoding: [0x74,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_10-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.6: # %if.else
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    addl $-48, %edx # encoding: [0x83,0xc2,0xd0]
 ; CHECK64-NEXT:    cmpl $10, %edx # encoding: [0x83,0xfa,0x0a]
 ; CHECK64-NEXT:    jmp .LBB3_8 # encoding: [0xeb,A]
 ; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_8-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  .LBB3_10: # %sw.bb14
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:  .LBB3_7: # %sw.bb14
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    movzbl (%rdi), %ecx # encoding: [0x0f,0xb6,0x0f]
 ; CHECK64-NEXT:    addl $-48, %ecx # encoding: [0x83,0xc1,0xd0]
 ; CHECK64-NEXT:    cmpl $10, %ecx # encoding: [0x83,0xf9,0x0a]
 ; CHECK64-NEXT:  .LBB3_8: # %if.else
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    movl %r9d, %ecx # encoding: [0x44,0x89,0xc9]
-; CHECK64-NEXT:    jb .LBB3_12 # encoding: [0x72,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_12-1, kind: FK_PCRel_1
-; CHECK64-NEXT:    jmp .LBB3_9 # encoding: [0xeb,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  .LBB3_11: # %sw.bb22
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:    jb .LBB3_10 # encoding: [0x72,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_10-1, kind: FK_PCRel_1
+; CHECK64-NEXT:    jmp .LBB3_13 # encoding: [0xeb,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_13-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  .LBB3_9: # %sw.bb22
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    movzbl (%rdi), %ecx # encoding: [0x0f,0xb6,0x0f]
 ; CHECK64-NEXT:    addl $-48, %ecx # encoding: [0x83,0xc1,0xd0]
 ; CHECK64-NEXT:    cmpl $10, %ecx # encoding: [0x83,0xf9,0x0a]
@@ -426,21 +426,21 @@ define zeroext i1 @pr31257(%"class.std::
 ; CHECK64-NEXT:    jae _Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEES3_ # TAILCALL
 ; CHECK64-NEXT:    # encoding: [0x73,A]
 ; CHECK64-NEXT:    # fixup A - offset: 1, value: _Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEES3_-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  .LBB3_12: # %for.inc
-; CHECK64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; CHECK64-NEXT:  .LBB3_10: # %for.inc
+; CHECK64-NEXT:    # in Loop: Header=BB3_11 Depth=1
 ; CHECK64-NEXT:    incq %rdi # encoding: [0x48,0xff,0xc7]
 ; CHECK64-NEXT:    decq %rax # encoding: [0x48,0xff,0xc8]
-; CHECK64-NEXT:  .LBB3_1: # %for.cond
+; CHECK64-NEXT:  .LBB3_11: # %for.cond
 ; CHECK64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK64-NEXT:    testq %rax, %rax # encoding: [0x48,0x85,0xc0]
-; CHECK64-NEXT:    jne .LBB3_2 # encoding: [0x75,A]
-; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_2-1, kind: FK_PCRel_1
-; CHECK64-NEXT:  # %bb.13:
+; CHECK64-NEXT:    jne .LBB3_1 # encoding: [0x75,A]
+; CHECK64-NEXT:    # fixup A - offset: 1, value: .LBB3_1-1, kind: FK_PCRel_1
+; CHECK64-NEXT:  # %bb.12:
 ; CHECK64-NEXT:    cmpl $2, %ecx # encoding: [0x83,0xf9,0x02]
 ; CHECK64-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; CHECK64-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK64-NEXT:    retq # encoding: [0xc3]
-; CHECK64-NEXT:  .LBB3_9:
+; CHECK64-NEXT:  .LBB3_13:
 ; CHECK64-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; CHECK64-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK64-NEXT:    retq # encoding: [0xc3]
@@ -451,51 +451,51 @@ define zeroext i1 @pr31257(%"class.std::
 ; WIN64-NEXT:    movq -24(%rcx), %r8 # encoding: [0x4c,0x8b,0x41,0xe8]
 ; WIN64-NEXT:    leaq (%rcx,%r8), %rdx # encoding: [0x4a,0x8d,0x14,0x01]
 ; WIN64-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
-; WIN64-NEXT:    jmp .LBB3_1 # encoding: [0xeb,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_1-1, kind: FK_PCRel_1
-; WIN64-NEXT:  .LBB3_2: # %for.body
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
-; WIN64-NEXT:    cmpl $2, %eax # encoding: [0x83,0xf8,0x02]
-; WIN64-NEXT:    je .LBB3_10 # encoding: [0x74,A]
+; WIN64-NEXT:    jmp .LBB3_10 # encoding: [0xeb,A]
 ; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_10-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.3: # %for.body
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:  .LBB3_1: # %for.body
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
+; WIN64-NEXT:    cmpl $2, %eax # encoding: [0x83,0xf8,0x02]
+; WIN64-NEXT:    je .LBB3_8 # encoding: [0x74,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_8-1, kind: FK_PCRel_1
+; WIN64-NEXT:  # %bb.2: # %for.body
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    cmpl $1, %eax # encoding: [0x83,0xf8,0x01]
-; WIN64-NEXT:    je .LBB3_9 # encoding: [0x74,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.4: # %for.body
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:    je .LBB3_6 # encoding: [0x74,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_6-1, kind: FK_PCRel_1
+; WIN64-NEXT:  # %bb.3: # %for.body
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    testl %eax, %eax # encoding: [0x85,0xc0]
-; WIN64-NEXT:    jne .LBB3_11 # encoding: [0x75,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_11-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.5: # %sw.bb
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:    jne .LBB3_9 # encoding: [0x75,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
+; WIN64-NEXT:  # %bb.4: # %sw.bb
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    movzbl (%rcx), %r9d # encoding: [0x44,0x0f,0xb6,0x09]
 ; WIN64-NEXT:    cmpl $43, %r9d # encoding: [0x41,0x83,0xf9,0x2b]
 ; WIN64-NEXT:    movl $1, %eax # encoding: [0xb8,0x01,0x00,0x00,0x00]
-; WIN64-NEXT:    je .LBB3_11 # encoding: [0x74,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_11-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.6: # %sw.bb
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:    je .LBB3_9 # encoding: [0x74,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
+; WIN64-NEXT:  # %bb.5: # %sw.bb
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    cmpb $45, %r9b # encoding: [0x41,0x80,0xf9,0x2d]
-; WIN64-NEXT:    je .LBB3_11 # encoding: [0x74,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_11-1, kind: FK_PCRel_1
+; WIN64-NEXT:    je .LBB3_9 # encoding: [0x74,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
 ; WIN64-NEXT:    jmp .LBB3_7 # encoding: [0xeb,A]
 ; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_7-1, kind: FK_PCRel_1
-; WIN64-NEXT:  .LBB3_9: # %sw.bb14
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:  .LBB3_6: # %sw.bb14
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    movzbl (%rcx), %r9d # encoding: [0x44,0x0f,0xb6,0x09]
 ; WIN64-NEXT:  .LBB3_7: # %if.else
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    addl $-48, %r9d # encoding: [0x41,0x83,0xc1,0xd0]
 ; WIN64-NEXT:    movl $2, %eax # encoding: [0xb8,0x02,0x00,0x00,0x00]
 ; WIN64-NEXT:    cmpl $10, %r9d # encoding: [0x41,0x83,0xf9,0x0a]
-; WIN64-NEXT:    jb .LBB3_11 # encoding: [0x72,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_11-1, kind: FK_PCRel_1
-; WIN64-NEXT:    jmp .LBB3_8 # encoding: [0xeb,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_8-1, kind: FK_PCRel_1
-; WIN64-NEXT:  .LBB3_10: # %sw.bb22
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:    jb .LBB3_9 # encoding: [0x72,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_9-1, kind: FK_PCRel_1
+; WIN64-NEXT:    jmp .LBB3_12 # encoding: [0xeb,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_12-1, kind: FK_PCRel_1
+; WIN64-NEXT:  .LBB3_8: # %sw.bb22
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    movzbl (%rcx), %r9d # encoding: [0x44,0x0f,0xb6,0x09]
 ; WIN64-NEXT:    addl $-48, %r9d # encoding: [0x41,0x83,0xc1,0xd0]
 ; WIN64-NEXT:    movl $2, %eax # encoding: [0xb8,0x02,0x00,0x00,0x00]
@@ -503,21 +503,21 @@ define zeroext i1 @pr31257(%"class.std::
 ; WIN64-NEXT:    jae _Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEES3_ # TAILCALL
 ; WIN64-NEXT:    # encoding: [0x73,A]
 ; WIN64-NEXT:    # fixup A - offset: 1, value: _Z20isValidIntegerSuffixN9__gnu_cxx17__normal_iteratorIPKcSsEES3_-1, kind: FK_PCRel_1
-; WIN64-NEXT:  .LBB3_11: # %for.inc
-; WIN64-NEXT:    # in Loop: Header=BB3_1 Depth=1
+; WIN64-NEXT:  .LBB3_9: # %for.inc
+; WIN64-NEXT:    # in Loop: Header=BB3_10 Depth=1
 ; WIN64-NEXT:    incq %rcx # encoding: [0x48,0xff,0xc1]
 ; WIN64-NEXT:    decq %r8 # encoding: [0x49,0xff,0xc8]
-; WIN64-NEXT:  .LBB3_1: # %for.cond
+; WIN64-NEXT:  .LBB3_10: # %for.cond
 ; WIN64-NEXT:    # =>This Inner Loop Header: Depth=1
 ; WIN64-NEXT:    testq %r8, %r8 # encoding: [0x4d,0x85,0xc0]
-; WIN64-NEXT:    jne .LBB3_2 # encoding: [0x75,A]
-; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_2-1, kind: FK_PCRel_1
-; WIN64-NEXT:  # %bb.12:
+; WIN64-NEXT:    jne .LBB3_1 # encoding: [0x75,A]
+; WIN64-NEXT:    # fixup A - offset: 1, value: .LBB3_1-1, kind: FK_PCRel_1
+; WIN64-NEXT:  # %bb.11:
 ; WIN64-NEXT:    cmpl $2, %eax # encoding: [0x83,0xf8,0x02]
 ; WIN64-NEXT:    sete %al # encoding: [0x0f,0x94,0xc0]
 ; WIN64-NEXT:    # kill: def $al killed $al killed $eax
 ; WIN64-NEXT:    retq # encoding: [0xc3]
-; WIN64-NEXT:  .LBB3_8:
+; WIN64-NEXT:  .LBB3_12:
 ; WIN64-NEXT:    xorl %eax, %eax # encoding: [0x31,0xc0]
 ; WIN64-NEXT:    # kill: def $al killed $al killed $eax
 ; WIN64-NEXT:    retq # encoding: [0xc3]

Modified: llvm/trunk/test/CodeGen/X86/loop-search.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/loop-search.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/loop-search.ll (original)
+++ llvm/trunk/test/CodeGen/X86/loop-search.ll Thu Jun 13 11:11:32 2019
@@ -8,26 +8,21 @@ define zeroext i1 @search(i32 %needle, i
 ; CHECK-LABEL: search:
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    testl %edx, %edx
-; CHECK-NEXT:    jle LBB0_1
-; CHECK-NEXT:  ## %bb.4: ## %for.body.preheader
+; CHECK-NEXT:    jle LBB0_5
+; CHECK-NEXT:  ## %bb.1: ## %for.body.preheader
 ; CHECK-NEXT:    movslq %edx, %rax
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  LBB0_5: ## %for.body
+; CHECK-NEXT:  LBB0_2: ## %for.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    cmpl %edi, (%rsi,%rcx,4)
 ; CHECK-NEXT:    je LBB0_6
-; CHECK-NEXT:  ## %bb.2: ## %for.cond
-; CHECK-NEXT:    ## in Loop: Header=BB0_5 Depth=1
+; CHECK-NEXT:  ## %bb.3: ## %for.cond
+; CHECK-NEXT:    ## in Loop: Header=BB0_2 Depth=1
 ; CHECK-NEXT:    incq %rcx
 ; CHECK-NEXT:    cmpq %rax, %rcx
-; CHECK-NEXT:    jl LBB0_5
-;            ### FIXME: %bb.3 and LBB0_1 should be merged
-; CHECK-NEXT:  ## %bb.3:
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
-; CHECK-NEXT:    retq
-; CHECK-NEXT:  LBB0_1:
+; CHECK-NEXT:    jl LBB0_2
+; CHECK-NEXT:  LBB0_5:
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    ## kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/machine-cp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/machine-cp.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/machine-cp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/machine-cp.ll Thu Jun 13 11:11:32 2019
@@ -8,22 +8,21 @@ define i32 @t1(i32 %a, i32 %b) nounwind
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    testl %esi, %esi
-; CHECK-NEXT:    je LBB0_1
-; CHECK-NEXT:  ## %bb.2: ## %while.body.preheader
+; CHECK-NEXT:    je LBB0_4
+; CHECK-NEXT:  ## %bb.1: ## %while.body.preheader
 ; CHECK-NEXT:    movl %esi, %edx
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  LBB0_3: ## %while.body
+; CHECK-NEXT:  LBB0_2: ## %while.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movl %edx, %ecx
 ; CHECK-NEXT:    cltd
 ; CHECK-NEXT:    idivl %ecx
 ; CHECK-NEXT:    testl %edx, %edx
 ; CHECK-NEXT:    movl %ecx, %eax
-; CHECK-NEXT:    jne LBB0_3
-; CHECK-NEXT:  ## %bb.4: ## %while.end
+; CHECK-NEXT:    jne LBB0_2
+; CHECK-NEXT:  ## %bb.3: ## %while.end
 ; CHECK-NEXT:    movl %ecx, %eax
-; CHECK-NEXT:    retq
-; CHECK-NEXT:  LBB0_1:
+; CHECK-NEXT:  LBB0_4:
 ; CHECK-NEXT:    retq
 entry:
   %cmp1 = icmp eq i32 %b, 0
@@ -60,22 +59,21 @@ define i32 @t3(i64 %a, i64 %b) nounwind
 ; CHECK:       ## %bb.0: ## %entry
 ; CHECK-NEXT:    movq %rdi, %rax
 ; CHECK-NEXT:    testq %rsi, %rsi
-; CHECK-NEXT:    je LBB2_1
-; CHECK-NEXT:  ## %bb.2: ## %while.body.preheader
+; CHECK-NEXT:    je LBB2_4
+; CHECK-NEXT:  ## %bb.1: ## %while.body.preheader
 ; CHECK-NEXT:    movq %rsi, %rdx
 ; CHECK-NEXT:    .p2align 4, 0x90
-; CHECK-NEXT:  LBB2_3: ## %while.body
+; CHECK-NEXT:  LBB2_2: ## %while.body
 ; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    movq %rdx, %rcx
 ; CHECK-NEXT:    cqto
 ; CHECK-NEXT:    idivq %rcx
 ; CHECK-NEXT:    testq %rdx, %rdx
 ; CHECK-NEXT:    movq %rcx, %rax
-; CHECK-NEXT:    jne LBB2_3
-; CHECK-NEXT:  ## %bb.4: ## %while.end
+; CHECK-NEXT:    jne LBB2_2
+; CHECK-NEXT:  ## %bb.3: ## %while.end
 ; CHECK-NEXT:    movl %ecx, %eax
-; CHECK-NEXT:    retq
-; CHECK-NEXT:  LBB2_1:
+; CHECK-NEXT:  LBB2_4:
 ; CHECK-NEXT:    retq
 entry:
   %cmp1 = icmp eq i64 %b, 0

Modified: llvm/trunk/test/CodeGen/X86/mul-constant-result.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/mul-constant-result.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/mul-constant-result.ll (original)
+++ llvm/trunk/test/CodeGen/X86/mul-constant-result.ll Thu Jun 13 11:11:32 2019
@@ -28,7 +28,7 @@ define i32 @mult(i32, i32) local_unnamed
 ; X86-NEXT:  .LBB0_4:
 ; X86-NEXT:    decl %ecx
 ; X86-NEXT:    cmpl $31, %ecx
-; X86-NEXT:    ja .LBB0_39
+; X86-NEXT:    ja .LBB0_7
 ; X86-NEXT:  # %bb.5:
 ; X86-NEXT:    jmpl *.LJTI0_0(,%ecx,4)
 ; X86-NEXT:  .LBB0_6:
@@ -36,209 +36,149 @@ define i32 @mult(i32, i32) local_unnamed
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_39:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    xorl %eax, %eax
-; X86-NEXT:  .LBB0_40:
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
 ; X86-NEXT:  .LBB0_7:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    leal (%eax,%eax,2), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
+; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:  .LBB0_8:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    shll $2, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_9:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB0_10:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    addl %eax, %eax
-; X86-NEXT:    leal (%eax,%eax,2), %eax
+; X86-NEXT:    shll $2, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_11:
+; X86-NEXT:  .LBB0_12:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    leal (,%eax,8), %ecx
-; X86-NEXT:    jmp .LBB0_12
+; X86-NEXT:    addl %eax, %eax
+; X86-NEXT:    jmp .LBB0_9
 ; X86-NEXT:  .LBB0_13:
-; X86-NEXT:    shll $3, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
+; X86-NEXT:    leal (,%eax,8), %ecx
+; X86-NEXT:    jmp .LBB0_41
 ; X86-NEXT:  .LBB0_14:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    leal (%eax,%eax,8), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_15:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    addl %eax, %eax
-; X86-NEXT:    leal (%eax,%eax,4), %eax
+; X86-NEXT:    shll $3, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB0_16:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    leal (%eax,%eax,4), %ecx
-; X86-NEXT:    leal (%eax,%ecx,2), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
+; X86-NEXT:    addl %eax, %eax
+; X86-NEXT:    jmp .LBB0_11
 ; X86-NEXT:  .LBB0_17:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    leal (%eax,%eax,4), %ecx
+; X86-NEXT:    jmp .LBB0_18
+; X86-NEXT:  .LBB0_19:
 ; X86-NEXT:    shll $2, %eax
-; X86-NEXT:    leal (%eax,%eax,2), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_18:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_9
+; X86-NEXT:  .LBB0_20:
 ; X86-NEXT:    leal (%eax,%eax,2), %ecx
-; X86-NEXT:    leal (%eax,%ecx,4), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_19:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_21
+; X86-NEXT:  .LBB0_22:
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    shll $4, %ecx
 ; X86-NEXT:    subl %eax, %ecx
-; X86-NEXT:    jmp .LBB0_12
-; X86-NEXT:  .LBB0_21:
+; X86-NEXT:    jmp .LBB0_41
+; X86-NEXT:  .LBB0_23:
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
-; X86-NEXT:    leal (%eax,%eax,2), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_22:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_9
+; X86-NEXT:  .LBB0_24:
 ; X86-NEXT:    shll $4, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_23:
+; X86-NEXT:  .LBB0_25:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    shll $4, %ecx
-; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_24:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_26
+; X86-NEXT:  .LBB0_27:
 ; X86-NEXT:    addl %eax, %eax
+; X86-NEXT:  .LBB0_15:
 ; X86-NEXT:    leal (%eax,%eax,8), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_25:
+; X86-NEXT:  .LBB0_28:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    leal (%eax,%eax,8), %ecx
+; X86-NEXT:  .LBB0_18:
 ; X86-NEXT:    leal (%eax,%ecx,2), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_26:
+; X86-NEXT:  .LBB0_29:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    shll $2, %eax
-; X86-NEXT:    leal (%eax,%eax,4), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_27:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_11
+; X86-NEXT:  .LBB0_30:
 ; X86-NEXT:    leal (%eax,%eax,4), %ecx
+; X86-NEXT:  .LBB0_21:
 ; X86-NEXT:    leal (%eax,%ecx,4), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_28:
+; X86-NEXT:  .LBB0_31:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    leal (%eax,%eax,4), %ecx
 ; X86-NEXT:    leal (%eax,%ecx,4), %ecx
-; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_29:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_26
+; X86-NEXT:  .LBB0_32:
 ; X86-NEXT:    leal (%eax,%eax,2), %ecx
 ; X86-NEXT:    shll $3, %ecx
-; X86-NEXT:    jmp .LBB0_12
-; X86-NEXT:  .LBB0_30:
+; X86-NEXT:    jmp .LBB0_41
+; X86-NEXT:  .LBB0_33:
 ; X86-NEXT:    shll $3, %eax
-; X86-NEXT:    leal (%eax,%eax,2), %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_31:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_9
+; X86-NEXT:  .LBB0_34:
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
+; X86-NEXT:  .LBB0_11:
 ; X86-NEXT:    leal (%eax,%eax,4), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_32:
+; X86-NEXT:  .LBB0_35:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    leal (%eax,%eax,4), %ecx
 ; X86-NEXT:    leal (%ecx,%ecx,4), %ecx
-; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_33:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_26
+; X86-NEXT:  .LBB0_36:
 ; X86-NEXT:    leal (%eax,%eax,8), %eax
+; X86-NEXT:  .LBB0_9:
 ; X86-NEXT:    leal (%eax,%eax,2), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_34:
+; X86-NEXT:  .LBB0_37:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    leal (%eax,%eax,8), %ecx
 ; X86-NEXT:    leal (%ecx,%ecx,2), %ecx
-; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    popl %esi
-; X86-NEXT:    .cfi_def_cfa_offset 4
-; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_35:
-; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    jmp .LBB0_26
+; X86-NEXT:  .LBB0_38:
 ; X86-NEXT:    leal (%eax,%eax,8), %ecx
 ; X86-NEXT:    leal (%ecx,%ecx,2), %ecx
 ; X86-NEXT:    addl %eax, %ecx
+; X86-NEXT:  .LBB0_26:
 ; X86-NEXT:    addl %ecx, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_36:
+; X86-NEXT:  .LBB0_39:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    shll $5, %ecx
 ; X86-NEXT:    subl %eax, %ecx
-; X86-NEXT:    jmp .LBB0_12
-; X86-NEXT:  .LBB0_37:
+; X86-NEXT:    jmp .LBB0_41
+; X86-NEXT:  .LBB0_40:
 ; X86-NEXT:    movl %eax, %ecx
 ; X86-NEXT:    shll $5, %ecx
-; X86-NEXT:  .LBB0_12:
+; X86-NEXT:  .LBB0_41:
 ; X86-NEXT:    subl %eax, %ecx
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
-; X86-NEXT:  .LBB0_38:
+; X86-NEXT:  .LBB0_42:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    shll $5, %eax
 ; X86-NEXT:    popl %esi
@@ -256,158 +196,149 @@ define i32 @mult(i32, i32) local_unnamed
 ; X64-HSW-NEXT:    cmovel %ecx, %eax
 ; X64-HSW-NEXT:    decl %edi
 ; X64-HSW-NEXT:    cmpl $31, %edi
-; X64-HSW-NEXT:    ja .LBB0_36
+; X64-HSW-NEXT:    ja .LBB0_3
 ; X64-HSW-NEXT:  # %bb.1:
 ; X64-HSW-NEXT:    jmpq *.LJTI0_0(,%rdi,8)
 ; X64-HSW-NEXT:  .LBB0_2:
 ; X64-HSW-NEXT:    addl %eax, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_36:
-; X64-HSW-NEXT:    xorl %eax, %eax
-; X64-HSW-NEXT:  .LBB0_37:
-; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
-; X64-HSW-NEXT:    retq
 ; X64-HSW-NEXT:  .LBB0_3:
-; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax
-; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
-; X64-HSW-NEXT:    retq
+; X64-HSW-NEXT:    xorl %eax, %eax
 ; X64-HSW-NEXT:  .LBB0_4:
-; X64-HSW-NEXT:    shll $2, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_5:
-; X64-HSW-NEXT:    leal (%rax,%rax,4), %eax
+; X64-HSW-NEXT:  .LBB0_6:
+; X64-HSW-NEXT:    shll $2, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_6:
+; X64-HSW-NEXT:  .LBB0_8:
 ; X64-HSW-NEXT:    addl %eax, %eax
+; X64-HSW-NEXT:  .LBB0_5:
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_7:
-; X64-HSW-NEXT:    leal (,%rax,8), %ecx
-; X64-HSW-NEXT:    jmp .LBB0_8
 ; X64-HSW-NEXT:  .LBB0_9:
-; X64-HSW-NEXT:    shll $3, %eax
-; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
-; X64-HSW-NEXT:    retq
+; X64-HSW-NEXT:    leal (,%rax,8), %ecx
+; X64-HSW-NEXT:    jmp .LBB0_37
 ; X64-HSW-NEXT:  .LBB0_10:
-; X64-HSW-NEXT:    leal (%rax,%rax,8), %eax
+; X64-HSW-NEXT:    shll $3, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_11:
+; X64-HSW-NEXT:  .LBB0_12:
 ; X64-HSW-NEXT:    addl %eax, %eax
+; X64-HSW-NEXT:  .LBB0_7:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_12:
+; X64-HSW-NEXT:  .LBB0_13:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %ecx
 ; X64-HSW-NEXT:    leal (%rax,%rcx,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_13:
+; X64-HSW-NEXT:  .LBB0_15:
 ; X64-HSW-NEXT:    shll $2, %eax
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_14:
+; X64-HSW-NEXT:  .LBB0_16:
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %ecx
 ; X64-HSW-NEXT:    leal (%rax,%rcx,4), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_15:
+; X64-HSW-NEXT:  .LBB0_18:
 ; X64-HSW-NEXT:    movl %eax, %ecx
 ; X64-HSW-NEXT:    shll $4, %ecx
 ; X64-HSW-NEXT:    subl %eax, %ecx
-; X64-HSW-NEXT:    jmp .LBB0_8
-; X64-HSW-NEXT:  .LBB0_17:
+; X64-HSW-NEXT:    jmp .LBB0_37
+; X64-HSW-NEXT:  .LBB0_19:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %eax
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_18:
+; X64-HSW-NEXT:  .LBB0_20:
 ; X64-HSW-NEXT:    shll $4, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_19:
+; X64-HSW-NEXT:  .LBB0_21:
 ; X64-HSW-NEXT:    movl %eax, %ecx
 ; X64-HSW-NEXT:    shll $4, %ecx
-; X64-HSW-NEXT:    jmp .LBB0_20
-; X64-HSW-NEXT:  .LBB0_21:
+; X64-HSW-NEXT:    jmp .LBB0_34
+; X64-HSW-NEXT:  .LBB0_22:
 ; X64-HSW-NEXT:    addl %eax, %eax
+; X64-HSW-NEXT:  .LBB0_11:
 ; X64-HSW-NEXT:    leal (%rax,%rax,8), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_22:
+; X64-HSW-NEXT:  .LBB0_23:
 ; X64-HSW-NEXT:    leal (%rax,%rax,8), %ecx
 ; X64-HSW-NEXT:    leal (%rax,%rcx,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_23:
+; X64-HSW-NEXT:  .LBB0_24:
 ; X64-HSW-NEXT:    shll $2, %eax
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_24:
+; X64-HSW-NEXT:  .LBB0_25:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %ecx
 ; X64-HSW-NEXT:    leal (%rax,%rcx,4), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_25:
+; X64-HSW-NEXT:  .LBB0_26:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %ecx
 ; X64-HSW-NEXT:    leal (%rax,%rcx,4), %ecx
-; X64-HSW-NEXT:    jmp .LBB0_20
-; X64-HSW-NEXT:  .LBB0_26:
+; X64-HSW-NEXT:    jmp .LBB0_34
+; X64-HSW-NEXT:  .LBB0_27:
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %ecx
 ; X64-HSW-NEXT:    shll $3, %ecx
-; X64-HSW-NEXT:    jmp .LBB0_8
-; X64-HSW-NEXT:  .LBB0_27:
+; X64-HSW-NEXT:    jmp .LBB0_37
+; X64-HSW-NEXT:  .LBB0_28:
 ; X64-HSW-NEXT:    shll $3, %eax
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_28:
+; X64-HSW-NEXT:  .LBB0_29:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %eax
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_29:
+; X64-HSW-NEXT:  .LBB0_30:
 ; X64-HSW-NEXT:    leal (%rax,%rax,4), %ecx
 ; X64-HSW-NEXT:    leal (%rcx,%rcx,4), %ecx
-; X64-HSW-NEXT:    jmp .LBB0_20
-; X64-HSW-NEXT:  .LBB0_30:
+; X64-HSW-NEXT:    jmp .LBB0_34
+; X64-HSW-NEXT:  .LBB0_31:
 ; X64-HSW-NEXT:    leal (%rax,%rax,8), %eax
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_31:
+; X64-HSW-NEXT:  .LBB0_32:
 ; X64-HSW-NEXT:    leal (%rax,%rax,8), %ecx
 ; X64-HSW-NEXT:    leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT:    jmp .LBB0_20
-; X64-HSW-NEXT:  .LBB0_32:
+; X64-HSW-NEXT:    jmp .LBB0_34
+; X64-HSW-NEXT:  .LBB0_33:
 ; X64-HSW-NEXT:    leal (%rax,%rax,8), %ecx
 ; X64-HSW-NEXT:    leal (%rcx,%rcx,2), %ecx
 ; X64-HSW-NEXT:    addl %eax, %ecx
-; X64-HSW-NEXT:  .LBB0_20:
+; X64-HSW-NEXT:  .LBB0_34:
 ; X64-HSW-NEXT:    addl %eax, %ecx
 ; X64-HSW-NEXT:    movl %ecx, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_33:
+; X64-HSW-NEXT:  .LBB0_35:
 ; X64-HSW-NEXT:    movl %eax, %ecx
 ; X64-HSW-NEXT:    shll $5, %ecx
 ; X64-HSW-NEXT:    subl %eax, %ecx
-; X64-HSW-NEXT:    jmp .LBB0_8
-; X64-HSW-NEXT:  .LBB0_34:
+; X64-HSW-NEXT:    jmp .LBB0_37
+; X64-HSW-NEXT:  .LBB0_36:
 ; X64-HSW-NEXT:    movl %eax, %ecx
 ; X64-HSW-NEXT:    shll $5, %ecx
-; X64-HSW-NEXT:  .LBB0_8:
+; X64-HSW-NEXT:  .LBB0_37:
 ; X64-HSW-NEXT:    subl %eax, %ecx
 ; X64-HSW-NEXT:    movl %ecx, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq
-; X64-HSW-NEXT:  .LBB0_35:
+; X64-HSW-NEXT:  .LBB0_39:
 ; X64-HSW-NEXT:    shll $5, %eax
 ; X64-HSW-NEXT:    # kill: def $eax killed $eax killed $rax
 ; X64-HSW-NEXT:    retq

Modified: llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir (original)
+++ llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir Thu Jun 13 11:11:32 2019
@@ -11,46 +11,37 @@ body:             |
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
   ; CHECK:   TEST8ri $dl, 1, implicit-def $eflags, implicit killed $edx
-  ; CHECK:   JCC_1 %bb.2, 4, implicit $eflags
-  ; CHECK: bb.1:
-  ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
-  ; CHECK:   RETQ $eax
+  ; CHECK:   JCC_1 %bb.1, 5, implicit $eflags
   ; CHECK: bb.2:
-  ; CHECK:   successors: %bb.3(0x30000000), %bb.4(0x50000000)
+  ; CHECK:   successors: %bb.1(0x30000000), %bb.3(0x50000000)
   ; CHECK:   $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
   ; CHECK:   TEST64rr $rax, $rax, implicit-def $eflags
-  ; CHECK:   JCC_1 %bb.3, 4, implicit $eflags
-  ; CHECK: bb.4:
-  ; CHECK:   successors: %bb.5(0x30000000), %bb.10(0x50000000)
+  ; CHECK:   JCC_1 %bb.1, 4, implicit $eflags
+  ; CHECK: bb.3:
+  ; CHECK:   successors: %bb.4(0x30000000), %bb.8(0x50000000)
   ; CHECK:   CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8)
-  ; CHECK:   JCC_1 %bb.10, 5, implicit $eflags
-  ; CHECK: bb.5:
-  ; CHECK:   successors: %bb.6(0x30000000), %bb.7(0x50000000)
+  ; CHECK:   JCC_1 %bb.8, 5, implicit $eflags
+  ; CHECK: bb.4:
+  ; CHECK:   successors: %bb.1(0x30000000), %bb.5(0x50000000)
   ; CHECK:   $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
   ; CHECK:   TEST64rr $rax, $rax, implicit-def $eflags
-  ; CHECK:   JCC_1 %bb.6, 4, implicit $eflags
-  ; CHECK: bb.7 (align 4):
-  ; CHECK:   successors: %bb.8(0x71555555), %bb.10(0x0eaaaaab)
+  ; CHECK:   JCC_1 %bb.1, 4, implicit $eflags
+  ; CHECK: bb.5 (align 4):
+  ; CHECK:   successors: %bb.6(0x71555555), %bb.8(0x0eaaaaab)
   ; CHECK:   CMP64mi8 killed $rax, 1, $noreg, 8, $noreg, 0, implicit-def $eflags :: (load 8), (load 8)
-  ; CHECK:   JCC_1 %bb.10, 5, implicit $eflags
-  ; CHECK: bb.8:
-  ; CHECK:   successors: %bb.9(0x04000000), %bb.7(0x7c000000)
+  ; CHECK:   JCC_1 %bb.8, 5, implicit $eflags
+  ; CHECK: bb.6:
+  ; CHECK:   successors: %bb.1(0x04000000), %bb.5(0x7c000000)
   ; CHECK:   $rax = MOV64rm $r14, 1, $noreg, 0, $noreg :: (load 8)
   ; CHECK:   TEST64rr $rax, $rax, implicit-def $eflags
-  ; CHECK:   JCC_1 %bb.7, 5, implicit $eflags
-  ; CHECK: bb.9:
+  ; CHECK:   JCC_1 %bb.5, 5, implicit $eflags
+  ; CHECK: bb.1:
   ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
   ; CHECK:   RETQ $eax
-  ; CHECK: bb.10:
+  ; CHECK: bb.8:
   ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
   ; CHECK:   dead $eax = XOR32rr undef $eax, undef $eax, implicit-def dead $eflags, implicit-def $al
   ; CHECK:   RETQ $eax
-  ; CHECK: bb.3:
-  ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
-  ; CHECK:   RETQ $eax
-  ; CHECK: bb.6:
-  ; CHECK:   $ebp = XOR32rr undef $ebp, undef $ebp, implicit-def dead $eflags
-  ; CHECK:   RETQ $eax
   bb.0:
     successors: %bb.1(0x40000000), %bb.7(0x40000000)
 

Modified: llvm/trunk/test/CodeGen/X86/tail-opts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tail-opts.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tail-opts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tail-opts.ll Thu Jun 13 11:11:32 2019
@@ -571,35 +571,32 @@ return:
 ; two_nosize - Same as two, but without the optsize attribute.
 ; Now two instructions are enough to be tail-duplicated.
 
-define void @two_nosize() nounwind {
+define void @two_nosize(i32 %x, i32 %y, i32 %z) nounwind {
 ; CHECK-LABEL: two_nosize:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB8_3
+; CHECK-NEXT:    testl %edi, %edi
+; CHECK-NEXT:    je .LBB8_3
 ; CHECK-NEXT:  # %bb.1: # %bby
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    jne .LBB8_4
+; CHECK-NEXT:    testl %esi, %esi
+; CHECK-NEXT:    je .LBB8_4
 ; CHECK-NEXT:  # %bb.2: # %bb7
 ; CHECK-NEXT:    movl $0, {{.*}}(%rip)
 ; CHECK-NEXT:    jmp tail_call_me # TAILCALL
 ; CHECK-NEXT:  .LBB8_3: # %bbx
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    testb %al, %al
-; CHECK-NEXT:    je .LBB8_5
-; CHECK-NEXT:  .LBB8_4: # %return
-; CHECK-NEXT:    retq
-; CHECK-NEXT:  .LBB8_5: # %bb12
+; CHECK-NEXT:    cmpl $-1, %edx
+; CHECK-NEXT:    je .LBB8_4
+; CHECK-NEXT:  # %bb.5: # %bb12
 ; CHECK-NEXT:    movl $0, {{.*}}(%rip)
 ; CHECK-NEXT:    jmp tail_call_me # TAILCALL
+; CHECK-NEXT:  .LBB8_4: # %return
+; CHECK-NEXT:    retq
 entry:
-  %0 = icmp eq i32 undef, 0
+  %0 = icmp eq i32 %x, 0
   br i1 %0, label %bbx, label %bby
 
 bby:
-  switch i32 undef, label %bb7 [
-    i32 16, label %return
+  switch i32 %y, label %bb7 [
+    i32 0, label %return
   ]
 
 bb7:
@@ -608,8 +605,8 @@ bb7:
   ret void
 
 bbx:
-  switch i32 undef, label %bb12 [
-    i32 128, label %return
+  switch i32 %z, label %bb12 [
+    i32 -1, label %return
   ]
 
 bb12:

Modified: llvm/trunk/test/CodeGen/X86/tail-threshold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tail-threshold.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tail-threshold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/tail-threshold.ll Thu Jun 13 11:11:32 2019
@@ -1,17 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=x86_64-pc-linux-gnu -tail-merge-threshold 2 < %s | FileCheck %s
 
 ; Test that we still do some merging if a block has more than
 ; tail-merge-threshold predecessors.
 
-; CHECK: 	callq	bar
-; CHECK:	callq	bar
-; CHECK:	callq	bar
-; CHECK-NOT:    callq
-
 declare void @bar()
 
-define void @foo(i32 %xxx) {
-entry:
+define void @foo(i32 %xxx) nounwind {
+; CHECK-LABEL: foo:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    cmpl $3, %edi
+; CHECK-NEXT:    ja .LBB0_4
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    jmpq *.LJTI0_0(,%rax,8)
+; CHECK-NEXT:  .LBB0_3: # %bb3
+; CHECK-NEXT:    callq bar
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    retq
+; CHECK-NEXT:  .LBB0_4: # %bb4
+; CHECK-NEXT:    callq bar
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    retq
   switch i32 %xxx, label %bb4 [
     i32 0, label %bb0
     i32 1, label %bb1

Modified: llvm/trunk/test/CodeGen/X86/test-shrink-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/test-shrink-bug.ll?rev=363284&r1=363283&r2=363284&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/test-shrink-bug.ll (original)
+++ llvm/trunk/test/CodeGen/X86/test-shrink-bug.ll Thu Jun 13 11:11:32 2019
@@ -52,15 +52,13 @@ define void @fail(i16 %a, <2 x i8> %b) {
 ; CHECK-X86-NEXT:    cmpb $123, {{[0-9]+}}(%esp)
 ; CHECK-X86-NEXT:    sete %al
 ; CHECK-X86-NEXT:    testl $263, %ecx ## imm = 0x107
-; CHECK-X86-NEXT:    je LBB1_2
+; CHECK-X86-NEXT:    je LBB1_3
 ; CHECK-X86-NEXT:  ## %bb.1:
 ; CHECK-X86-NEXT:    testb %al, %al
-; CHECK-X86-NEXT:    jne LBB1_2
-; CHECK-X86-NEXT:  ## %bb.3: ## %no
+; CHECK-X86-NEXT:    jne LBB1_3
+; CHECK-X86-NEXT:  ## %bb.2: ## %no
 ; CHECK-X86-NEXT:    calll _bar
-; CHECK-X86-NEXT:    addl $12, %esp
-; CHECK-X86-NEXT:    retl
-; CHECK-X86-NEXT:  LBB1_2: ## %yes
+; CHECK-X86-NEXT:  LBB1_3: ## %yes
 ; CHECK-X86-NEXT:    addl $12, %esp
 ; CHECK-X86-NEXT:    retl
 ;
@@ -69,7 +67,7 @@ define void @fail(i16 %a, <2 x i8> %b) {
 ; CHECK-X64-NEXT:    pushq %rax
 ; CHECK-X64-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-X64-NEXT:    testl $263, %edi # imm = 0x107
-; CHECK-X64-NEXT:    je .LBB1_2
+; CHECK-X64-NEXT:    je .LBB1_3
 ; CHECK-X64-NEXT:  # %bb.1:
 ; CHECK-X64-NEXT:    pand {{.*}}(%rip), %xmm0
 ; CHECK-X64-NEXT:    pcmpeqd {{.*}}(%rip), %xmm0
@@ -77,14 +75,10 @@ define void @fail(i16 %a, <2 x i8> %b) {
 ; CHECK-X64-NEXT:    pand %xmm0, %xmm1
 ; CHECK-X64-NEXT:    pextrw $4, %xmm1, %eax
 ; CHECK-X64-NEXT:    testb $1, %al
-; CHECK-X64-NEXT:    jne .LBB1_2
-; CHECK-X64-NEXT:  # %bb.3: # %no
+; CHECK-X64-NEXT:    jne .LBB1_3
+; CHECK-X64-NEXT:  # %bb.2: # %no
 ; CHECK-X64-NEXT:    callq bar
-; CHECK-X64-NEXT:    popq %rax
-; CHECK-X64-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-X64-NEXT:    retq
-; CHECK-X64-NEXT:  .LBB1_2: # %yes
-; CHECK-X64-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-X64-NEXT:  .LBB1_3: # %yes
 ; CHECK-X64-NEXT:    popq %rax
 ; CHECK-X64-NEXT:    .cfi_def_cfa_offset 8
 ; CHECK-X64-NEXT:    retq




More information about the llvm-commits mailing list