[llvm] [MachineLICM] Let targets decide if copy-like instructions are cheap (PR #146599)

Guy David via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 1 13:38:13 PDT 2025


https://github.com/guy-david created https://github.com/llvm/llvm-project/pull/146599

When checking whether it is profitable to hoist an instruction, the pass may override a target's ruling because it assumes that all COPY instructions are cheap, and that may not be the case for all micro-architectures (especially for when copying between different register classes).

On AArch64 there's 0% difference in performance in LLVM's test-suite with this change. Additionally, very few tests were affected which shows how it is not so useful to keep it.

>From e38181c96ff9b9c6a4fe5d93a5ad0616e292c61c Mon Sep 17 00:00:00 2001
From: Guy David <guyda96 at gmail.com>
Date: Tue, 1 Jul 2025 14:29:54 +0300
Subject: [PATCH] [MachineLICM] Let targets decide if copy-like instructions
 are cheap

When checking whether it is profitable to hoist an instruction, the pass
may override a target's ruling because it assumes that all COPY
instructions are cheap, and that may not be the case for all
micro-architectures.

On AArch64 there's 0% difference in performance in LLVM's test-suite.
Additionally, very few tests were affected by this change which shows
how useful it is to keep it.
---
 llvm/lib/CodeGen/MachineLICM.cpp              |   2 +-
 .../AArch64/dag-combine-concat-vectors.ll     |  42 +--
 llvm/test/CodeGen/PowerPC/vsx-fma-m-early.ll  | 258 ++++++++++--------
 llvm/test/CodeGen/X86/break-false-dep.ll      |  44 +--
 .../CodeGen/X86/dag-update-nodetomatch.ll     |  78 +++---
 llvm/test/CodeGen/X86/memfold-mov32r0.ll      |   9 -
 llvm/test/CodeGen/X86/memfold-mov32r0.mir     | 143 ++++++++++
 llvm/test/CodeGen/X86/pr57673.ll              |  36 +--
 llvm/test/CodeGen/X86/reverse_branches.ll     |  10 +-
 9 files changed, 387 insertions(+), 235 deletions(-)
 delete mode 100644 llvm/test/CodeGen/X86/memfold-mov32r0.ll
 create mode 100644 llvm/test/CodeGen/X86/memfold-mov32r0.mir

diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp
index c9079170ca575..70a178f642fb0 100644
--- a/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/llvm/lib/CodeGen/MachineLICM.cpp
@@ -1219,7 +1219,7 @@ bool MachineLICMImpl::HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
 /// Return true if the instruction is marked "cheap" or the operand latency
 /// between its def and a use is one or less.
 bool MachineLICMImpl::IsCheapInstruction(MachineInstr &MI) const {
-  if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike())
+  if (TII->isAsCheapAsAMove(MI))
     return true;
 
   bool isCheap = false;
diff --git a/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll b/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
index 53126a08db86f..2bd04ac30509e 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
@@ -18,14 +18,28 @@ define fastcc i8 @allocno_reload_assign(ptr %p) {
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    movi v0.2d, #0000000000000000
 ; CHECK-NEXT:    mvn w8, w8
+; CHECK-NEXT:    uunpklo z1.h, z0.b
+; CHECK-NEXT:    uunpkhi z2.h, z0.b
 ; CHECK-NEXT:    sbfx x8, x8, #0, #1
 ; CHECK-NEXT:    whilelo p0.b, xzr, x8
+; CHECK-NEXT:    uunpklo z3.s, z1.h
+; CHECK-NEXT:    uunpkhi z4.s, z1.h
+; CHECK-NEXT:    uunpklo z6.s, z2.h
+; CHECK-NEXT:    uunpkhi z16.s, z2.h
 ; CHECK-NEXT:    punpklo p1.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:    punpklo p2.h, p1.b
 ; CHECK-NEXT:    punpkhi p4.h, p1.b
+; CHECK-NEXT:    uunpklo z1.d, z3.s
+; CHECK-NEXT:    uunpkhi z2.d, z3.s
 ; CHECK-NEXT:    punpklo p6.h, p0.b
+; CHECK-NEXT:    uunpklo z3.d, z4.s
+; CHECK-NEXT:    uunpkhi z4.d, z4.s
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
+; CHECK-NEXT:    uunpklo z5.d, z6.s
+; CHECK-NEXT:    uunpkhi z6.d, z6.s
+; CHECK-NEXT:    uunpklo z7.d, z16.s
+; CHECK-NEXT:    uunpkhi z16.d, z16.s
 ; CHECK-NEXT:    punpklo p1.h, p2.b
 ; CHECK-NEXT:    punpkhi p2.h, p2.b
 ; CHECK-NEXT:    punpklo p3.h, p4.b
@@ -35,28 +49,14 @@ define fastcc i8 @allocno_reload_assign(ptr %p) {
 ; CHECK-NEXT:    punpklo p7.h, p0.b
 ; CHECK-NEXT:    punpkhi p0.h, p0.b
 ; CHECK-NEXT:  .LBB0_1: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    uunpklo z1.h, z0.b
-; CHECK-NEXT:    uunpklo z2.s, z1.h
-; CHECK-NEXT:    uunpkhi z1.s, z1.h
-; CHECK-NEXT:    uunpklo z3.d, z2.s
-; CHECK-NEXT:    uunpkhi z2.d, z2.s
-; CHECK-NEXT:    st1b { z3.d }, p1, [z0.d]
+; CHECK-NEXT:    st1b { z1.d }, p1, [z0.d]
 ; CHECK-NEXT:    st1b { z2.d }, p2, [z0.d]
-; CHECK-NEXT:    uunpklo z2.d, z1.s
-; CHECK-NEXT:    uunpkhi z1.d, z1.s
-; CHECK-NEXT:    st1b { z2.d }, p3, [z0.d]
-; CHECK-NEXT:    uunpkhi z2.h, z0.b
-; CHECK-NEXT:    uunpklo z3.s, z2.h
-; CHECK-NEXT:    uunpkhi z2.s, z2.h
-; CHECK-NEXT:    st1b { z1.d }, p4, [z0.d]
-; CHECK-NEXT:    uunpklo z1.d, z3.s
-; CHECK-NEXT:    st1b { z1.d }, p5, [z0.d]
-; CHECK-NEXT:    uunpkhi z1.d, z3.s
-; CHECK-NEXT:    st1b { z1.d }, p6, [z0.d]
-; CHECK-NEXT:    uunpklo z1.d, z2.s
-; CHECK-NEXT:    st1b { z1.d }, p7, [z0.d]
-; CHECK-NEXT:    uunpkhi z1.d, z2.s
-; CHECK-NEXT:    st1b { z1.d }, p0, [z0.d]
+; CHECK-NEXT:    st1b { z3.d }, p3, [z0.d]
+; CHECK-NEXT:    st1b { z4.d }, p4, [z0.d]
+; CHECK-NEXT:    st1b { z5.d }, p5, [z0.d]
+; CHECK-NEXT:    st1b { z6.d }, p6, [z0.d]
+; CHECK-NEXT:    st1b { z7.d }, p7, [z0.d]
+; CHECK-NEXT:    st1b { z16.d }, p0, [z0.d]
 ; CHECK-NEXT:    str p8, [x0]
 ; CHECK-NEXT:    b .LBB0_1
   br label %1
diff --git a/llvm/test/CodeGen/PowerPC/vsx-fma-m-early.ll b/llvm/test/CodeGen/PowerPC/vsx-fma-m-early.ll
index 9cb2d4444b974..6cfb8b0e73f7c 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-fma-m-early.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-fma-m-early.ll
@@ -1,17 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ;; Tests that the ppc-vsx-fma-mutate pass with the schedule-ppc-vsx-fma-mutation-early pass does not hoist xxspltiw out of loops.
 ; RUN: llc -verify-machineinstrs -mcpu=pwr10 -disable-ppc-vsx-fma-mutation=false \
 ; RUN:   -ppc-asm-full-reg-names -schedule-ppc-vsx-fma-mutation-early \
-; RUN:    -mtriple powerpc64-ibm-aix < %s | FileCheck --check-prefixes=CHECK64,AIX64 %s
+; RUN:    -mtriple powerpc64-ibm-aix < %s | FileCheck --check-prefixes=AIX64 %s
 
 ; RUN: llc -verify-machineinstrs -mcpu=pwr10 -disable-ppc-vsx-fma-mutation=false \
 ; RUN:   -ppc-asm-full-reg-names -schedule-ppc-vsx-fma-mutation-early \
-; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefixes=CHECK64,LINUX64 %s
+; RUN:   -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck --check-prefixes=LINUX64 %s
 
 ; RUN: llc -verify-machineinstrs -mcpu=pwr10 -disable-ppc-vsx-fma-mutation=false \
 ; RUN:   -ppc-asm-full-reg-names -schedule-ppc-vsx-fma-mutation-early \
 ; RUN:    -mtriple powerpc-ibm-aix < %s | FileCheck --check-prefix=CHECK32 %s
 
 define void @bar(ptr noalias nocapture noundef writeonly %__output_a, ptr noalias nocapture noundef readonly %var1321In_a, ptr noalias nocapture noundef readonly %n) {
+; AIX64-LABEL: bar:
+; AIX64:       # %bb.0: # %entry
+; AIX64-NEXT:    lwz r5, 0(r5)
+; AIX64-NEXT:    cmpwi r5, 1
+; AIX64-NEXT:    bltlr cr0
+; AIX64-NEXT:  # %bb.1: # %for.body.preheader
+; AIX64-NEXT:    xxspltiw vs0, 1069066811
+; AIX64-NEXT:    xxspltiw vs1, 1170469888
+; AIX64-NEXT:    mtctr r5
+; AIX64-NEXT:    li r5, 0
+; AIX64-NEXT:    .align 5
+; AIX64-NEXT:  L..BB0_2: # %for.body
+; AIX64-NEXT:    #
+; AIX64-NEXT:    lxvx vs2, r4, r5
+; AIX64-NEXT:    xvmaddmsp vs2, vs0, vs1
+; AIX64-NEXT:    stxvx vs2, r3, r5
+; AIX64-NEXT:    addi r5, r5, 16
+; AIX64-NEXT:    bdnz L..BB0_2
+; AIX64-NEXT:  # %bb.3: # %for.end
+; AIX64-NEXT:    blr
+;
+; LINUX64-LABEL: bar:
+; LINUX64:       # %bb.0: # %entry
+; LINUX64-NEXT:    lwz r5, 0(r5)
+; LINUX64-NEXT:    cmpwi r5, 1
+; LINUX64-NEXT:    bltlr cr0
+; LINUX64-NEXT:  # %bb.1: # %for.body.preheader
+; LINUX64-NEXT:    xxspltiw vs0, 1069066811
+; LINUX64-NEXT:    xxspltiw vs1, 1170469888
+; LINUX64-NEXT:    mtctr r5
+; LINUX64-NEXT:    li r5, 0
+; LINUX64-NEXT:    .p2align 5
+; LINUX64-NEXT:  .LBB0_2: # %for.body
+; LINUX64-NEXT:    #
+; LINUX64-NEXT:    lxvx vs2, r4, r5
+; LINUX64-NEXT:    xvmaddmsp vs2, vs0, vs1
+; LINUX64-NEXT:    stxvx vs2, r3, r5
+; LINUX64-NEXT:    addi r5, r5, 16
+; LINUX64-NEXT:    bdnz .LBB0_2
+; LINUX64-NEXT:  # %bb.3: # %for.end
+; LINUX64-NEXT:    blr
+;
+; CHECK32-LABEL: bar:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    lwz r5, 0(r5)
+; CHECK32-NEXT:    cmpwi r5, 0
+; CHECK32-NEXT:    blelr cr0
+; CHECK32-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK32-NEXT:    xxspltiw vs0, 1069066811
+; CHECK32-NEXT:    xxspltiw vs1, 1170469888
+; CHECK32-NEXT:    li r6, 0
+; CHECK32-NEXT:    li r7, 0
+; CHECK32-NEXT:    .align 4
+; CHECK32-NEXT:  L..BB0_2: # %for.body
+; CHECK32-NEXT:    #
+; CHECK32-NEXT:    slwi r8, r7, 4
+; CHECK32-NEXT:    addic r7, r7, 1
+; CHECK32-NEXT:    addze r6, r6
+; CHECK32-NEXT:    lxvx vs2, r4, r8
+; CHECK32-NEXT:    xvmaddmsp vs2, vs0, vs1
+; CHECK32-NEXT:    stxvx vs2, r3, r8
+; CHECK32-NEXT:    xor r8, r7, r5
+; CHECK32-NEXT:    or. r8, r8, r6
+; CHECK32-NEXT:    bne cr0, L..BB0_2
+; CHECK32-NEXT:  # %bb.3: # %for.end
+; CHECK32-NEXT:    blr
 entry:
   %0 = load i32, ptr %n, align 4
   %cmp11 = icmp sgt i32 %0, 0
@@ -28,7 +95,7 @@ for.body:
   %add.ptr.val = load <4 x float>, ptr %add.ptr, align 1
   %2 = tail call contract <4 x float> @llvm.fma.v4f32(<4 x float> %add.ptr.val, <4 x float> <float 0x3FF7154760000000, float 0x3FF7154760000000, float 0x3FF7154760000000, float 0x3FF7154760000000>, <4 x float> <float 6.270500e+03, float 6.270500e+03, float 6.270500e+03, float 6.270500e+03>)
   %add.ptr6 = getelementptr inbounds float, ptr %__output_a, i64 %1
-  store <4 x float> %2, ptr %add.ptr6, align 1 
+  store <4 x float> %2, ptr %add.ptr6, align 1
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
   br i1 %exitcond.not, label %for.end, label %for.body
@@ -38,6 +105,74 @@ for.end:
 }
 
 define void @foo(i1 %cmp97) #0 {
+; AIX64-LABEL: foo:
+; AIX64:       # %bb.0: # %entry
+; AIX64-NEXT:    andi. r3, r3, 1
+; AIX64-NEXT:    bclr 4, gt, 0
+; AIX64-NEXT:  # %bb.1: # %for.body.preheader
+; AIX64-NEXT:    xxlxor f0, f0, f0
+; AIX64-NEXT:    xxlxor f2, f2, f2
+; AIX64-NEXT:    xxmrghd vs1, vs0, vs0
+; AIX64-NEXT:    xvcvdpsp vs34, vs1
+; AIX64-NEXT:    xxlxor vs1, vs1, vs1
+; AIX64-NEXT:    .align 4
+; AIX64-NEXT:  L..BB1_2: # %for.body
+; AIX64-NEXT:    #
+; AIX64-NEXT:    xxmrghd vs2, vs2, vs0
+; AIX64-NEXT:    xvcvdpsp vs35, vs2
+; AIX64-NEXT:    xxspltiw vs2, 1170469888
+; AIX64-NEXT:    vmrgew v3, v3, v2
+; AIX64-NEXT:    xvcmpgtsp vs3, vs1, vs35
+; AIX64-NEXT:    xvmaddasp vs2, vs35, vs1
+; AIX64-NEXT:    xxland vs2, vs3, vs2
+; AIX64-NEXT:    xscvspdpn f2, vs2
+; AIX64-NEXT:    b L..BB1_2
+;
+; LINUX64-LABEL: foo:
+; LINUX64:       # %bb.0: # %entry
+; LINUX64-NEXT:    andi. r3, r3, 1
+; LINUX64-NEXT:    bclr 4, gt, 0
+; LINUX64-NEXT:  # %bb.1: # %for.body.preheader
+; LINUX64-NEXT:    xxlxor f0, f0, f0
+; LINUX64-NEXT:    xxlxor f2, f2, f2
+; LINUX64-NEXT:    xxspltd vs1, vs0, 0
+; LINUX64-NEXT:    xvcvdpsp vs34, vs1
+; LINUX64-NEXT:    xxlxor vs1, vs1, vs1
+; LINUX64-NEXT:    .p2align 4
+; LINUX64-NEXT:  .LBB1_2: # %for.body
+; LINUX64-NEXT:    #
+; LINUX64-NEXT:    xxmrghd vs2, vs0, vs2
+; LINUX64-NEXT:    xvcvdpsp vs35, vs2
+; LINUX64-NEXT:    xxspltiw vs2, 1170469888
+; LINUX64-NEXT:    vmrgew v3, v2, v3
+; LINUX64-NEXT:    xvcmpgtsp vs3, vs1, vs35
+; LINUX64-NEXT:    xvmaddasp vs2, vs35, vs1
+; LINUX64-NEXT:    xxland vs2, vs3, vs2
+; LINUX64-NEXT:    xxsldwi vs2, vs2, vs2, 3
+; LINUX64-NEXT:    xscvspdpn f2, vs2
+; LINUX64-NEXT:    b .LBB1_2
+;
+; CHECK32-LABEL: foo:
+; CHECK32:       # %bb.0: # %entry
+; CHECK32-NEXT:    andi. r3, r3, 1
+; CHECK32-NEXT:    bclr 4, gt, 0
+; CHECK32-NEXT:  # %bb.1: # %for.body.preheader
+; CHECK32-NEXT:    lwz r3, L..C0(r2) # %const.0
+; CHECK32-NEXT:    xxlxor f1, f1, f1
+; CHECK32-NEXT:    xxlxor vs0, vs0, vs0
+; CHECK32-NEXT:    xscvdpspn vs35, f1
+; CHECK32-NEXT:    lxv vs34, 0(r3)
+; CHECK32-NEXT:    .align 4
+; CHECK32-NEXT:  L..BB1_2: # %for.body
+; CHECK32-NEXT:    #
+; CHECK32-NEXT:    xscvdpspn vs36, f1
+; CHECK32-NEXT:    xxspltiw vs1, 1170469888
+; CHECK32-NEXT:    vperm v4, v4, v3, v2
+; CHECK32-NEXT:    xvcmpgtsp vs2, vs0, vs36
+; CHECK32-NEXT:    xvmaddasp vs1, vs36, vs0
+; CHECK32-NEXT:    xxland vs1, vs2, vs1
+; CHECK32-NEXT:    xscvspdpn f1, vs1
+; CHECK32-NEXT:    b L..BB1_2
 entry:
   br i1 %cmp97, label %for.body, label %for.end
 
@@ -57,122 +192,7 @@ for.end:                                          ; preds = %entry
 }
 
 ; Function Attrs: mustprogress nocallback nofree nosync nounwind speculatable willreturn memory(none)
-declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) 
+declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
 
 ; Function Attrs: nocallback nofree nosync nounwind willreturn memory(none)
 declare <4 x i32> @llvm.ppc.vsx.xvcmpgtsp(<4 x float>, <4 x float>)
-
-; CHECK64:      bar:
-; CHECK64:      # %bb.0:                                # %entry
-; CHECK64-NEXT:         lwz r5, 0(r5)
-; CHECK64-NEXT:         cmpwi   r5, 1
-; CHECK64-NEXT:         bltlr   cr0
-; CHECK64-NEXT: # %bb.1:                                # %for.body.preheader
-; CHECK64-NEXT:         xxspltiw vs0, 1069066811
-; CHECK64-NEXT:         xxspltiw vs1, 1170469888
-; CHECK64-NEXT:         mtctr r5
-; CHECK64-NEXT:         li r5, 0
-; CHECK64-NEXT:         {{.*}}align  5
-; CHECK64-NEXT: [[L2_bar:.*]]:                               # %for.body
-; CHECK64-NEXT:                                         # =>This Inner Loop Header: Depth=1
-; CHECK64-NEXT:         lxvx vs2, r4, r5
-; CHECK64-NEXT:         xvmaddmsp vs2, vs0, vs1
-; CHECK64-NEXT:         stxvx vs2, r3, r5
-; CHECK64-NEXT:         addi r5, r5, 16
-; CHECK64-NEXT:         bdnz [[L2_bar]]
-; CHECK64-NEXT: # %bb.3:                                # %for.end
-; CHECK64-NEXT:         blr
-
-; AIX64:      .foo:
-; AIX64-NEXT: # %bb.0:                                # %entry
-; AIX64-NEXT:   andi. r3, r3, 1
-; AIX64-NEXT:   bclr 4, gt, 0
-; AIX64-NEXT: # %bb.1:                                # %for.body.preheader
-; AIX64-NEXT:   xxlxor f0, f0, f0
-; AIX64-NEXT:   xxlxor vs1, vs1, vs1
-; AIX64-NEXT:   xxlxor f2, f2, f2
-; AIX64-NEXT:   .align  4
-; AIX64-NEXT: L..BB1_2:                               # %for.body
-; AIX64-NEXT:                                         # =>This Inner Loop Header: Depth=1
-; AIX64-NEXT:   xxmrghd vs2, vs2, vs0
-; AIX64-NEXT:   xvcvdpsp vs34, vs2
-; AIX64-NEXT:   xxmrghd vs2, vs0, vs0
-; AIX64-NEXT:   xvcvdpsp vs35, vs2
-; AIX64-NEXT:   xxspltiw vs2, 1170469888
-; AIX64-NEXT:   vmrgew v2, v2, v3
-; AIX64-NEXT:   xvcmpgtsp vs3, vs1, vs34
-; AIX64-NEXT:   xvmaddasp vs2, vs34, vs1
-; AIX64-NEXT:   xxland vs2, vs3, vs2
-; AIX64-NEXT:   xscvspdpn f2, vs2
-; AIX64-NEXT:   b L..BB1_2
-
-; LINUX64:      foo:                                    # @foo
-; LINUX64-NEXT: .Lfunc_begin1:
-; LINUX64-NEXT:         .cfi_startproc
-; LINUX64-NEXT: # %bb.0:                                # %entry
-; LINUX64-NEXT:         andi. r3, r3, 1
-; LINUX64-NEXT:         bclr 4, gt, 0
-; LINUX64-NEXT: # %bb.1:                                # %for.body.preheader
-; LINUX64-NEXT:         xxlxor f0, f0, f0
-; LINUX64-NEXT:         xxlxor vs1, vs1, vs1
-; LINUX64-NEXT:         xxlxor f2, f2, f2
-; LINUX64-NEXT:         .p2align        4
-; LINUX64-NEXT: .LBB1_2:                                # %for.body
-; LINUX64-NEXT:                                         # =>This Inner Loop Header: Depth=1
-; LINUX64-NEXT:         xxmrghd vs2, vs0, vs2
-; LINUX64-NEXT:         xvcvdpsp vs34, vs2
-; LINUX64-NEXT:         xxspltd vs2, vs0, 0
-; LINUX64-NEXT:         xvcvdpsp vs35, vs2
-; LINUX64-NEXT:         xxspltiw vs2, 1170469888
-; LINUX64-NEXT:         vmrgew v2, v3, v2
-; LINUX64-NEXT:         xvcmpgtsp vs3, vs1, vs34
-; LINUX64-NEXT:         xvmaddasp vs2, vs34, vs1
-; LINUX64-NEXT:         xxland vs2, vs3, vs2
-; LINUX64-NEXT:         xxsldwi vs2, vs2, vs2, 3
-; LINUX64-NEXT:         xscvspdpn f2, vs2
-; LINUX64-NEXT:         b .LBB1_2
-
-; CHECK32:        .bar:
-; CHECK32-NEXT: # %bb.0:                                # %entry
-; CHECK32-NEXT:       lwz r5, 0(r5)
-; CHECK32-NEXT:       cmpwi   r5, 0
-; CHECK32-NEXT:       blelr cr0
-; CHECK32-NEXT: # %bb.1:                                # %for.body.preheader
-; CHECK32-NEXT:       xxspltiw vs0, 1069066811
-; CHECK32-NEXT:       xxspltiw vs1, 1170469888
-; CHECK32-NEXT:       li r6, 0
-; CHECK32-NEXT:       li r7, 0
-; CHECK32-NEXT:       .align  4
-; CHECK32-NEXT: [[L2_foo:.*]]:                               # %for.body
-; CHECK32-NEXT:                                         # =>This Inner Loop Header: Depth=1
-; CHECK32-NEXT:       slwi r8, r7, 4
-; CHECK32-NEXT:       addic r7, r7, 1
-; CHECK32-NEXT:       addze r6, r6
-; CHECK32-NEXT:       lxvx vs2, r4, r8
-; CHECK32-NEXT:       xvmaddmsp vs2, vs0, vs1
-; CHECK32-NEXT:       stxvx vs2, r3, r8
-; CHECK32-NEXT:       xor r8, r7, r5
-; CHECK32-NEXT:       or. r8, r8, r6
-; CHECK32-NEXT:       bne     cr0, [[L2_foo]]
-
-; CHECK32:      .foo:
-; CHECK32-NEXT: # %bb.0:                                # %entry
-; CHECK32-NEXT:         andi. r3, r3, 1
-; CHECK32-NEXT:         bclr 4, gt, 0
-; CHECK32-NEXT: # %bb.1:                                # %for.body.preheader
-; CHECK32-NEXT:         lwz r3, L..C0(r2)                       # %const.0
-; CHECK32-NEXT:         xxlxor f1, f1, f1
-; CHECK32-NEXT:         xxlxor vs0, vs0, vs0
-; CHECK32-NEXT:         xscvdpspn vs35, f1
-; CHECK32-NEXT:         lxv vs34, 0(r3)
-; CHECK32-NEXT:         .align  4
-; CHECK32-NEXT: L..BB1_2:                               # %for.body
-; CHECK32-NEXT:                                         # =>This Inner Loop Header: Depth=1
-; CHECK32-NEXT:         xscvdpspn vs36, f1
-; CHECK32-NEXT:         xxspltiw vs1, 1170469888
-; CHECK32-NEXT:         vperm v4, v4, v3, v2
-; CHECK32-NEXT:         xvcmpgtsp vs2, vs0, vs36
-; CHECK32-NEXT:         xvmaddasp vs1, vs36, vs0
-; CHECK32-NEXT:         xxland vs1, vs2, vs1
-; CHECK32-NEXT:         xscvspdpn f1, vs1
-; CHECK32-NEXT:         b L..BB1_2
diff --git a/llvm/test/CodeGen/X86/break-false-dep.ll b/llvm/test/CodeGen/X86/break-false-dep.ll
index 6943622fac7f2..a6ad3018e052c 100644
--- a/llvm/test/CodeGen/X86/break-false-dep.ll
+++ b/llvm/test/CodeGen/X86/break-false-dep.ll
@@ -472,17 +472,17 @@ define dso_local void @loopdep3() {
 ; SSE-WIN-NEXT:    movaps %xmm6, (%rsp) # 16-byte Spill
 ; SSE-WIN-NEXT:    .seh_savexmm %xmm6, 0
 ; SSE-WIN-NEXT:    .seh_endprologue
-; SSE-WIN-NEXT:    xorl %eax, %eax
-; SSE-WIN-NEXT:    leaq v(%rip), %rcx
-; SSE-WIN-NEXT:    leaq x(%rip), %rdx
-; SSE-WIN-NEXT:    leaq y(%rip), %r8
-; SSE-WIN-NEXT:    leaq z(%rip), %r9
-; SSE-WIN-NEXT:    leaq w(%rip), %r10
+; SSE-WIN-NEXT:    leaq v(%rip), %rax
+; SSE-WIN-NEXT:    leaq x(%rip), %rcx
+; SSE-WIN-NEXT:    leaq y(%rip), %rdx
+; SSE-WIN-NEXT:    leaq z(%rip), %r8
+; SSE-WIN-NEXT:    leaq w(%rip), %r9
+; SSE-WIN-NEXT:    xorl %r10d, %r10d
 ; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB8_1: # %for.cond1.preheader
 ; SSE-WIN-NEXT:    # =>This Loop Header: Depth=1
 ; SSE-WIN-NEXT:    # Child Loop BB8_2 Depth 2
-; SSE-WIN-NEXT:    movq %rcx, %r11
+; SSE-WIN-NEXT:    movq %rax, %r11
 ; SSE-WIN-NEXT:    xorl %esi, %esi
 ; SSE-WIN-NEXT:    .p2align 4
 ; SSE-WIN-NEXT:  .LBB8_2: # %for.body3
@@ -490,10 +490,10 @@ define dso_local void @loopdep3() {
 ; SSE-WIN-NEXT:    # => This Inner Loop Header: Depth=2
 ; SSE-WIN-NEXT:    xorps %xmm0, %xmm0
 ; SSE-WIN-NEXT:    cvtsi2sdl (%r11), %xmm0
+; SSE-WIN-NEXT:    mulsd (%rsi,%rcx), %xmm0
 ; SSE-WIN-NEXT:    mulsd (%rsi,%rdx), %xmm0
 ; SSE-WIN-NEXT:    mulsd (%rsi,%r8), %xmm0
-; SSE-WIN-NEXT:    mulsd (%rsi,%r9), %xmm0
-; SSE-WIN-NEXT:    movsd %xmm0, (%rsi,%r10)
+; SSE-WIN-NEXT:    movsd %xmm0, (%rsi,%r9)
 ; SSE-WIN-NEXT:    #APP
 ; SSE-WIN-NEXT:    #NO_APP
 ; SSE-WIN-NEXT:    addq $8, %rsi
@@ -502,8 +502,8 @@ define dso_local void @loopdep3() {
 ; SSE-WIN-NEXT:    jne .LBB8_2
 ; SSE-WIN-NEXT:  # %bb.3: # %for.inc14
 ; SSE-WIN-NEXT:    # in Loop: Header=BB8_1 Depth=1
-; SSE-WIN-NEXT:    incl %eax
-; SSE-WIN-NEXT:    cmpl $100000, %eax # imm = 0x186A0
+; SSE-WIN-NEXT:    incl %r10d
+; SSE-WIN-NEXT:    cmpl $100000, %r10d # imm = 0x186A0
 ; SSE-WIN-NEXT:    jne .LBB8_1
 ; SSE-WIN-NEXT:  # %bb.4: # %for.end16
 ; SSE-WIN-NEXT:    movaps (%rsp), %xmm6 # 16-byte Reload
@@ -550,17 +550,17 @@ define dso_local void @loopdep3() {
 ; AVX-NEXT:    vmovaps %xmm6, (%rsp) # 16-byte Spill
 ; AVX-NEXT:    .seh_savexmm %xmm6, 0
 ; AVX-NEXT:    .seh_endprologue
-; AVX-NEXT:    xorl %eax, %eax
-; AVX-NEXT:    leaq v(%rip), %rcx
-; AVX-NEXT:    leaq x(%rip), %rdx
-; AVX-NEXT:    leaq y(%rip), %r8
-; AVX-NEXT:    leaq z(%rip), %r9
-; AVX-NEXT:    leaq w(%rip), %r10
+; AVX-NEXT:    leaq v(%rip), %rax
+; AVX-NEXT:    leaq x(%rip), %rcx
+; AVX-NEXT:    leaq y(%rip), %rdx
+; AVX-NEXT:    leaq z(%rip), %r8
+; AVX-NEXT:    leaq w(%rip), %r9
+; AVX-NEXT:    xorl %r10d, %r10d
 ; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB8_1: # %for.cond1.preheader
 ; AVX-NEXT:    # =>This Loop Header: Depth=1
 ; AVX-NEXT:    # Child Loop BB8_2 Depth 2
-; AVX-NEXT:    movq %rcx, %r11
+; AVX-NEXT:    movq %rax, %r11
 ; AVX-NEXT:    xorl %esi, %esi
 ; AVX-NEXT:    .p2align 4
 ; AVX-NEXT:  .LBB8_2: # %for.body3
@@ -568,10 +568,10 @@ define dso_local void @loopdep3() {
 ; AVX-NEXT:    # => This Inner Loop Header: Depth=2
 ; AVX-NEXT:    vxorps %xmm5, %xmm5, %xmm5
 ; AVX-NEXT:    vcvtsi2sdl (%r11), %xmm5, %xmm0
+; AVX-NEXT:    vmulsd (%rsi,%rcx), %xmm0, %xmm0
 ; AVX-NEXT:    vmulsd (%rsi,%rdx), %xmm0, %xmm0
 ; AVX-NEXT:    vmulsd (%rsi,%r8), %xmm0, %xmm0
-; AVX-NEXT:    vmulsd (%rsi,%r9), %xmm0, %xmm0
-; AVX-NEXT:    vmovsd %xmm0, (%rsi,%r10)
+; AVX-NEXT:    vmovsd %xmm0, (%rsi,%r9)
 ; AVX-NEXT:    #APP
 ; AVX-NEXT:    #NO_APP
 ; AVX-NEXT:    addq $8, %rsi
@@ -580,8 +580,8 @@ define dso_local void @loopdep3() {
 ; AVX-NEXT:    jne .LBB8_2
 ; AVX-NEXT:  # %bb.3: # %for.inc14
 ; AVX-NEXT:    # in Loop: Header=BB8_1 Depth=1
-; AVX-NEXT:    incl %eax
-; AVX-NEXT:    cmpl $100000, %eax # imm = 0x186A0
+; AVX-NEXT:    incl %r10d
+; AVX-NEXT:    cmpl $100000, %r10d # imm = 0x186A0
 ; AVX-NEXT:    jne .LBB8_1
 ; AVX-NEXT:  # %bb.4: # %for.end16
 ; AVX-NEXT:    vmovaps (%rsp), %xmm6 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
index bf6b09674e187..aab35c1fe4856 100644
--- a/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
+++ b/llvm/test/CodeGen/X86/dag-update-nodetomatch.ll
@@ -123,7 +123,7 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    movq x3 at GOTPCREL(%rip), %rcx
 ; CHECK-NEXT:    movl (%rcx), %ecx
 ; CHECK-NEXT:    testl %ecx, %ecx
-; CHECK-NEXT:    je .LBB1_18
+; CHECK-NEXT:    je .LBB1_17
 ; CHECK-NEXT:  # %bb.1: # %for.cond1thread-pre-split.lr.ph
 ; CHECK-NEXT:    movq x5 at GOTPCREL(%rip), %rdx
 ; CHECK-NEXT:    movq (%rdx), %rsi
@@ -140,27 +140,27 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
 ; CHECK-NEXT:    leaq 32(%rsi), %r11
 ; CHECK-NEXT:    leaq 8(,%rbx,8), %rbx
-; CHECK-NEXT:    xorl %r14d, %r14d
-; CHECK-NEXT:    movq x0 at GOTPCREL(%rip), %r15
-; CHECK-NEXT:    movq %rsi, %r12
+; CHECK-NEXT:    movq x0 at GOTPCREL(%rip), %r14
+; CHECK-NEXT:    movq %rsi, %r15
+; CHECK-NEXT:    xorl %r12d, %r12d
 ; CHECK-NEXT:    jmp .LBB1_2
 ; CHECK-NEXT:    .p2align 4
-; CHECK-NEXT:  .LBB1_15: # %for.cond1.for.inc3_crit_edge
+; CHECK-NEXT:  .LBB1_14: # %for.cond1.for.inc3_crit_edge
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movl %edx, (%r8)
-; CHECK-NEXT:  .LBB1_16: # %for.inc3
+; CHECK-NEXT:  .LBB1_15: # %for.inc3
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    addq %rbx, %r12
-; CHECK-NEXT:    incq %r14
+; CHECK-NEXT:    addq %rbx, %r15
+; CHECK-NEXT:    incq %r12
 ; CHECK-NEXT:    addq %rbx, %r11
 ; CHECK-NEXT:    incl %ecx
-; CHECK-NEXT:    je .LBB1_17
+; CHECK-NEXT:    je .LBB1_16
 ; CHECK-NEXT:  .LBB1_2: # %for.cond1thread-pre-split
 ; CHECK-NEXT:    # =>This Loop Header: Depth=1
-; CHECK-NEXT:    # Child Loop BB1_12 Depth 2
-; CHECK-NEXT:    # Child Loop BB1_14 Depth 2
+; CHECK-NEXT:    # Child Loop BB1_11 Depth 2
+; CHECK-NEXT:    # Child Loop BB1_13 Depth 2
 ; CHECK-NEXT:    testl %edx, %edx
-; CHECK-NEXT:    jns .LBB1_16
+; CHECK-NEXT:    jns .LBB1_15
 ; CHECK-NEXT:  # %bb.3: # %for.body2.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movslq %edx, %r13
@@ -170,49 +170,47 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    subq %r13, %rbp
 ; CHECK-NEXT:    incq %rbp
 ; CHECK-NEXT:    cmpq $4, %rbp
-; CHECK-NEXT:    jb .LBB1_14
+; CHECK-NEXT:    jb .LBB1_13
 ; CHECK-NEXT:  # %bb.4: # %min.iters.checked
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movq %rbp, %rdx
 ; CHECK-NEXT:    andq $-4, %rdx
-; CHECK-NEXT:    je .LBB1_14
+; CHECK-NEXT:    je .LBB1_13
 ; CHECK-NEXT:  # %bb.5: # %vector.memcheck
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
-; CHECK-NEXT:    imulq %r14, %rax
+; CHECK-NEXT:    imulq %r12, %rax
 ; CHECK-NEXT:    leaq (%rsi,%rax), %r10
 ; CHECK-NEXT:    leaq (%r10,%r13,8), %r9
 ; CHECK-NEXT:    testq %r13, %r13
 ; CHECK-NEXT:    movq $-1, %r10
 ; CHECK-NEXT:    cmovnsq %r13, %r10
-; CHECK-NEXT:    cmpq %r15, %r9
+; CHECK-NEXT:    cmpq %r14, %r9
 ; CHECK-NEXT:    jae .LBB1_7
 ; CHECK-NEXT:  # %bb.6: # %vector.memcheck
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
 ; CHECK-NEXT:    leaq (%rax,%r10,8), %rax
-; CHECK-NEXT:    cmpq %r15, %rax
-; CHECK-NEXT:    ja .LBB1_14
+; CHECK-NEXT:    cmpq %r14, %rax
+; CHECK-NEXT:    ja .LBB1_13
 ; CHECK-NEXT:  .LBB1_7: # %vector.body.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    leaq -4(%rdx), %rax
 ; CHECK-NEXT:    btl $2, %eax
-; CHECK-NEXT:    jb .LBB1_8
-; CHECK-NEXT:  # %bb.9: # %vector.body.prol.preheader
+; CHECK-NEXT:    movl $0, %r10d
+; CHECK-NEXT:    jb .LBB1_9
+; CHECK-NEXT:  # %bb.8: # %vector.body.prol.preheader
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
-; CHECK-NEXT:    movdqu %xmm0, (%r12,%r13,8)
-; CHECK-NEXT:    movdqu %xmm0, 16(%r12,%r13,8)
+; CHECK-NEXT:    movdqu %xmm0, (%r15,%r13,8)
+; CHECK-NEXT:    movdqu %xmm0, 16(%r15,%r13,8)
 ; CHECK-NEXT:    movl $4, %r10d
+; CHECK-NEXT:  .LBB1_9: # %vector.body.prol.loopexit.unr-lcssa
+; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    shrq $2, %rax
-; CHECK-NEXT:    jne .LBB1_11
-; CHECK-NEXT:    jmp .LBB1_13
-; CHECK-NEXT:  .LBB1_8: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT:    xorl %r10d, %r10d
-; CHECK-NEXT:    shrq $2, %rax
-; CHECK-NEXT:    je .LBB1_13
-; CHECK-NEXT:  .LBB1_11: # %vector.body.preheader.new
+; CHECK-NEXT:    je .LBB1_12
+; CHECK-NEXT:  # %bb.10: # %vector.body.preheader.new
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    movq {{.*#+}} xmm0 = mem[0],zero
 ; CHECK-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
@@ -221,7 +219,7 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    addq %r13, %r10
 ; CHECK-NEXT:    leaq (%r11,%r10,8), %r10
 ; CHECK-NEXT:    .p2align 4
-; CHECK-NEXT:  .LBB1_12: # %vector.body
+; CHECK-NEXT:  .LBB1_11: # %vector.body
 ; CHECK-NEXT:    # Parent Loop BB1_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
 ; CHECK-NEXT:    movdqu %xmm0, -32(%r10)
@@ -230,30 +228,30 @@ define void @_Z2x6v() local_unnamed_addr {
 ; CHECK-NEXT:    movdqu %xmm0, 16(%r10)
 ; CHECK-NEXT:    addq $64, %r10
 ; CHECK-NEXT:    addq $8, %rax
-; CHECK-NEXT:    jne .LBB1_12
-; CHECK-NEXT:  .LBB1_13: # %middle.block
+; CHECK-NEXT:    jne .LBB1_11
+; CHECK-NEXT:  .LBB1_12: # %middle.block
 ; CHECK-NEXT:    # in Loop: Header=BB1_2 Depth=1
 ; CHECK-NEXT:    addq %rdx, %r13
 ; CHECK-NEXT:    cmpq %rdx, %rbp
 ; CHECK-NEXT:    movq %r13, %rdx
-; CHECK-NEXT:    je .LBB1_15
+; CHECK-NEXT:    je .LBB1_14
 ; CHECK-NEXT:    .p2align 4
-; CHECK-NEXT:  .LBB1_14: # %for.body2
+; CHECK-NEXT:  .LBB1_13: # %for.body2
 ; CHECK-NEXT:    # Parent Loop BB1_2 Depth=1
 ; CHECK-NEXT:    # => This Inner Loop Header: Depth=2
-; CHECK-NEXT:    movq (%r15), %rax
-; CHECK-NEXT:    movq %rax, (%r12,%r13,8)
+; CHECK-NEXT:    movq (%r14), %rax
+; CHECK-NEXT:    movq %rax, (%r15,%r13,8)
 ; CHECK-NEXT:    leaq 1(%r13), %rdx
 ; CHECK-NEXT:    cmpq $-1, %r13
 ; CHECK-NEXT:    movq %rdx, %r13
-; CHECK-NEXT:    jl .LBB1_14
-; CHECK-NEXT:    jmp .LBB1_15
-; CHECK-NEXT:  .LBB1_17: # %for.cond.for.end5_crit_edge
+; CHECK-NEXT:    jl .LBB1_13
+; CHECK-NEXT:    jmp .LBB1_14
+; CHECK-NEXT:  .LBB1_16: # %for.cond.for.end5_crit_edge
 ; CHECK-NEXT:    movq x5 at GOTPCREL(%rip), %rax
 ; CHECK-NEXT:    movq %rdi, (%rax)
 ; CHECK-NEXT:    movq x3 at GOTPCREL(%rip), %rax
 ; CHECK-NEXT:    movl $0, (%rax)
-; CHECK-NEXT:  .LBB1_18: # %for.end5
+; CHECK-NEXT:  .LBB1_17: # %for.end5
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 48
 ; CHECK-NEXT:    popq %r12
diff --git a/llvm/test/CodeGen/X86/memfold-mov32r0.ll b/llvm/test/CodeGen/X86/memfold-mov32r0.ll
deleted file mode 100644
index f7cbf6c33c94c..0000000000000
--- a/llvm/test/CodeGen/X86/memfold-mov32r0.ll
+++ /dev/null
@@ -1,9 +0,0 @@
-; RUN: llc < %s -mtriple=x86_64 | FileCheck %s
-
-; CHECK:    movq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
-define i32 @test() nounwind {
-entry:
-  %div = udiv i256 0, 0
-  store i256 %div, ptr null, align 16
-  ret i32 0
-}
diff --git a/llvm/test/CodeGen/X86/memfold-mov32r0.mir b/llvm/test/CodeGen/X86/memfold-mov32r0.mir
new file mode 100644
index 0000000000000..729b8098a0266
--- /dev/null
+++ b/llvm/test/CodeGen/X86/memfold-mov32r0.mir
@@ -0,0 +1,143 @@
+# RUN: llc -start-after=early-machinelicm -mtriple=x86_64 %s -o - | FileCheck %s
+
+---
+name: test
+tracksRegLiveness: true
+isSSA: true
+body:             |
+  ; CHECK:    movq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
+  bb.0:
+    successors: %bb.5(0x40000000), %bb.4(0x40000000)
+  
+    %80:gr32 = MOV32r0 implicit-def dead $eflags
+    %15:gr64 = SUBREG_TO_REG 0, killed %80, %subreg.sub_32bit
+    %14:gr64 = COPY %15
+    %13:gr64 = COPY %15
+    %12:gr64 = COPY %15
+    %81:gr8 = MOV8ri 1
+    TEST8rr %81, %81, implicit-def $eflags
+    JCC_1 %bb.5, 5, implicit $eflags
+    JMP_1 %bb.4
+  
+  bb.1:
+    successors: %bb.5(0x80000000)
+  
+    %16:gr64 = PHI %82, %bb.4, %48, %bb.2
+    %17:gr64 = PHI %83, %bb.4, %49, %bb.2
+    %18:gr64 = PHI %84, %bb.4, %50, %bb.2
+    %19:gr64 = PHI %85, %bb.4, %51, %bb.2
+    %20:gr64 = PHI %72, %bb.4, %44, %bb.2
+    %21:gr64 = PHI %73, %bb.4, %45, %bb.2
+    %22:gr64 = PHI %74, %bb.4, %46, %bb.2
+    %23:gr64 = PHI %75, %bb.4, %47, %bb.2
+    %105:gr64 = SHLD64rri8 %23, %22, 1, implicit-def dead $eflags
+    %106:gr64 = SHLD64rri8 %22, %21, 1, implicit-def dead $eflags
+    %107:gr64 = SHLD64rri8 %21, %20, 1, implicit-def dead $eflags
+    %108:gr64 = ADD64rr %20, %20, implicit-def dead $eflags
+    %24:gr64 = OR64rr %16, killed %108, implicit-def dead $eflags
+    %25:gr64 = OR64rr %17, killed %107, implicit-def dead $eflags
+    %26:gr64 = OR64rr %18, killed %106, implicit-def dead $eflags
+    %27:gr64 = OR64rr %19, killed %105, implicit-def dead $eflags
+    JMP_1 %bb.5
+  
+  bb.2:
+    successors: %bb.1(0x04000000), %bb.2(0x7c000000)
+  
+    %28:gr64 = PHI %88, %bb.3, %48, %bb.2
+    %29:gr64 = PHI %89, %bb.3, %49, %bb.2
+    %30:gr64 = PHI %90, %bb.3, %50, %bb.2
+    %31:gr64 = PHI %91, %bb.3, %51, %bb.2
+    %32:gr64 = PHI %68, %bb.3, %56, %bb.2
+    %33:gr64 = PHI %69, %bb.3, %57, %bb.2
+    %34:gr64 = PHI %70, %bb.3, %58, %bb.2
+    %35:gr64 = PHI %71, %bb.3, %59, %bb.2
+    %36:gr64 = PHI %60, %bb.3, %52, %bb.2
+    %37:gr64 = PHI %61, %bb.3, %53, %bb.2
+    %38:gr64 = PHI %62, %bb.3, %54, %bb.2
+    %39:gr64 = PHI %63, %bb.3, %55, %bb.2
+    %40:gr64 = PHI %72, %bb.3, %44, %bb.2
+    %41:gr64 = PHI %73, %bb.3, %45, %bb.2
+    %42:gr64 = PHI %74, %bb.3, %46, %bb.2
+    %43:gr64 = PHI %75, %bb.3, %47, %bb.2
+    %55:gr64 = SHLD64rri8 %39, %38, 1, implicit-def dead $eflags
+    %54:gr64 = SHLD64rri8 %38, %37, 1, implicit-def dead $eflags
+    %53:gr64 = SHLD64rri8 %37, %36, 1, implicit-def dead $eflags
+    %52:gr64 = SHLD64rri8 %36, %43, 1, implicit-def dead $eflags
+    %93:gr64 = SHLD64rri8 %43, %42, 1, implicit-def dead $eflags
+    %94:gr64 = SHLD64rri8 %42, %41, 1, implicit-def dead $eflags
+    %95:gr64 = SHLD64rri8 %41, %40, 1, implicit-def dead $eflags
+    %96:gr64 = ADD64rr %40, %40, implicit-def dead $eflags
+    %44:gr64 = OR64rr %28, killed %96, implicit-def dead $eflags
+    %45:gr64 = OR64rr %29, killed %95, implicit-def dead $eflags
+    %46:gr64 = OR64rr %30, killed %94, implicit-def dead $eflags
+    %47:gr64 = OR64rr %31, killed %93, implicit-def dead $eflags
+    %97:gr64 = SUB64rr %64, %52, implicit-def $eflags
+    %98:gr64 = SBB64rr %65, %53, implicit-def $eflags, implicit $eflags
+    %99:gr64 = SBB64rr %66, %54, implicit-def $eflags, implicit $eflags
+    %100:gr64 = SBB64rr %67, %55, implicit-def dead $eflags, implicit $eflags
+    %48:gr64 = SHR64ri %100, 63, implicit-def dead $eflags
+    %49:gr64 = SUBREG_TO_REG 0, %92, %subreg.sub_32bit
+    %51:gr64 = COPY %49
+    %50:gr64 = COPY %49
+    %56:gr64 = ADD64ri32 %32, -1, implicit-def $eflags
+    %57:gr64 = ADC64ri32 %33, -1, implicit-def $eflags, implicit $eflags
+    %58:gr64 = ADC64ri32 %34, -1, implicit-def $eflags, implicit $eflags
+    %59:gr64 = ADC64ri32 %35, -1, implicit-def dead $eflags, implicit $eflags
+    %102:gr64 = OR64rr %57, %59, implicit-def dead $eflags
+    %103:gr64 = OR64rr %56, %58, implicit-def dead $eflags
+    %104:gr64 = OR64rr %103, killed %102, implicit-def $eflags
+    JCC_1 %bb.1, 4, implicit $eflags
+    JMP_1 %bb.2
+  
+  bb.3:
+    successors: %bb.2(0x80000000)
+  
+    %92:gr32 = MOV32r0 implicit-def dead $eflags
+    %62:gr64 = SUBREG_TO_REG 0, %92, %subreg.sub_32bit
+    %63:gr64 = COPY %62
+    %61:gr64 = COPY %62
+    %60:gr64 = COPY %62
+    %66:gr64 = MOV64ri32 -1
+    %67:gr64 = COPY %66
+    %65:gr64 = COPY %66
+    %64:gr64 = COPY %66
+    %91:gr64 = COPY %62
+    %90:gr64 = COPY %62
+    %89:gr64 = COPY %62
+    %88:gr64 = COPY %62
+    JMP_1 %bb.2
+  
+  bb.4:
+    successors: %bb.1(0x30000000), %bb.3(0x50000000)
+  
+    %68:gr64 = MOV32ri64 1
+    %86:gr32 = MOV32r0 implicit-def dead $eflags
+    %74:gr64 = SUBREG_TO_REG 0, %86, %subreg.sub_32bit
+    %71:gr64 = COPY %74
+    %70:gr64 = COPY %74
+    %69:gr64 = COPY %74
+    %75:gr64 = COPY %74
+    %73:gr64 = COPY %74
+    %72:gr64 = COPY %74
+    %85:gr64 = COPY %74
+    %84:gr64 = COPY %74
+    %83:gr64 = COPY %74
+    %82:gr64 = COPY %74
+    %87:gr8 = COPY %86.sub_8bit
+    TEST8rr %87, %87, implicit-def $eflags
+    JCC_1 %bb.1, 5, implicit $eflags
+    JMP_1 %bb.3
+  
+  bb.5:
+    %76:gr64 = PHI %12, %bb.0, %24, %bb.1
+    %77:gr64 = PHI %13, %bb.0, %25, %bb.1
+    %78:gr64 = PHI %14, %bb.0, %26, %bb.1
+    %79:gr64 = PHI %15, %bb.0, %27, %bb.1
+    MOV64mr $noreg, 1, $noreg, 0, $noreg, %76 :: (store (s64) into `ptr null`, align 16)
+    MOV64mr $noreg, 1, $noreg, 8, $noreg, %77 :: (store (s64) into `ptr null` + 8, basealign 16)
+    MOV64mr $noreg, 1, $noreg, 16, $noreg, %78 :: (store (s64) into `ptr null` + 16, align 16)
+    MOV64mr $noreg, 1, $noreg, 24, $noreg, %79 :: (store (s64) into `ptr null` + 24, basealign 16)
+    %109:gr32 = MOV32r0 implicit-def dead $eflags
+    $eax = COPY %109
+    RET 0, $eax
+...
diff --git a/llvm/test/CodeGen/X86/pr57673.ll b/llvm/test/CodeGen/X86/pr57673.ll
index c3710a7fc462c..0aecbd785d54a 100644
--- a/llvm/test/CodeGen/X86/pr57673.ll
+++ b/llvm/test/CodeGen/X86/pr57673.ll
@@ -20,16 +20,17 @@ define void @foo() {
   ; NORMAL: bb.0.bb_entry:
   ; NORMAL-NEXT:   successors: %bb.1(0x80000000)
   ; NORMAL-NEXT: {{  $}}
-  ; NORMAL-NEXT:   [[MOV32r0_:%[0-9]+]]:gr8 = IMPLICIT_DEF
-  ; NORMAL-NEXT:   [[COPY:%[0-9]+]]:gr8 = IMPLICIT_DEF
- ; NORMAL-NEXT:    [[MOV32r0_1:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+  ; NORMAL-NEXT:   [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF
+  ; NORMAL-NEXT:   [[DEF1:%[0-9]+]]:gr8 = IMPLICIT_DEF
+  ; NORMAL-NEXT:   [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+  ; NORMAL-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOV32r0_]], %subreg.sub_32bit
   ; NORMAL-NEXT:   [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.1.i, 1, $noreg, 0, $noreg
-  ; NORMAL-NEXT:   [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF
+  ; NORMAL-NEXT:   [[DEF2:%[0-9]+]]:gr64 = IMPLICIT_DEF
   ; NORMAL-NEXT: {{  $}}
   ; NORMAL-NEXT: bb.1.bb_8:
   ; NORMAL-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
   ; NORMAL-NEXT: {{  $}}
-  ; NORMAL-NEXT:   TEST8rr [[MOV32r0_]], [[COPY]], implicit-def $eflags
+  ; NORMAL-NEXT:   TEST8rr [[DEF]], [[DEF1]], implicit-def $eflags
   ; NORMAL-NEXT:   JCC_1 %bb.3, 5, implicit $eflags
   ; NORMAL-NEXT:   JMP_1 %bb.2
   ; NORMAL-NEXT: {{  $}}
@@ -38,7 +39,7 @@ define void @foo() {
   ; NORMAL-NEXT: {{  $}}
   ; NORMAL-NEXT:   [[MOVUPSrm:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i4, align 8)
   ; NORMAL-NEXT:   MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm]] :: (store (s128) into `ptr null`, align 8)
-  ; NORMAL-NEXT:   DBG_VALUE_LIST !3, !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 40, DW_OP_stack_value), %stack.1.i, %stack.1.i, debug-location !8
+  ; NORMAL-NEXT:   DBG_VALUE_LIST !3, !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 40, DW_OP_stack_value), %stack.1.i, %stack.1.i,  debug-location !8
   ; NORMAL-NEXT:   [[MOVUPSrm1:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i6, align 8)
   ; NORMAL-NEXT:   MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm1]] :: (store (s128) into `ptr null`, align 8)
   ; NORMAL-NEXT: {{  $}}
@@ -46,13 +47,12 @@ define void @foo() {
   ; NORMAL-NEXT:   successors: %bb.1(0x80000000)
   ; NORMAL-NEXT: {{  $}}
   ; NORMAL-NEXT:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
-  ; NORMAL-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOV32r0_1]], %subreg.sub_32bit
   ; NORMAL-NEXT:   $rdi = COPY [[SUBREG_TO_REG]]
   ; NORMAL-NEXT:   $rsi = COPY [[SUBREG_TO_REG]]
   ; NORMAL-NEXT:   $rdx = COPY [[SUBREG_TO_REG]]
-  ; NORMAL-NEXT:   $ecx = COPY [[MOV32r0_1]]
+  ; NORMAL-NEXT:   $ecx = COPY [[MOV32r0_]]
   ; NORMAL-NEXT:   $r8 = COPY [[LEA64r]]
-  ; NORMAL-NEXT:   CALL64r [[DEF]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit $ecx, implicit $r8, implicit-def $rsp, implicit-def $ssp
+  ; NORMAL-NEXT:   CALL64r [[DEF2]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit $ecx, implicit $r8, implicit-def $rsp, implicit-def $ssp
   ; NORMAL-NEXT:   ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; NORMAL-NEXT:   JMP_1 %bb.1
   ;
@@ -60,16 +60,17 @@ define void @foo() {
   ; INSTRREF: bb.0.bb_entry:
   ; INSTRREF-NEXT:   successors: %bb.1(0x80000000)
   ; INSTRREF-NEXT: {{  $}}
-  ; INSTRREF-NEXT:   [[MOV32r0_:%[0-9]+]]:gr8 = IMPLICIT_DEF
-  ; INSTRREF-NEXT:   [[COPY:%[0-9]+]]:gr8 = IMPLICIT_DEF
-  ; INSTRREF-NEXT:   [[MOV32r0_1:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+  ; INSTRREF-NEXT:   [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF
+  ; INSTRREF-NEXT:   [[DEF1:%[0-9]+]]:gr8 = IMPLICIT_DEF
+  ; INSTRREF-NEXT:   [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
+  ; INSTRREF-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOV32r0_]], %subreg.sub_32bit
   ; INSTRREF-NEXT:   [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.1.i, 1, $noreg, 0, $noreg
-  ; INSTRREF-NEXT:   [[DEF:%[0-9]+]]:gr64 = IMPLICIT_DEF
+  ; INSTRREF-NEXT:   [[DEF2:%[0-9]+]]:gr64 = IMPLICIT_DEF
   ; INSTRREF-NEXT: {{  $}}
   ; INSTRREF-NEXT: bb.1.bb_8:
   ; INSTRREF-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
   ; INSTRREF-NEXT: {{  $}}
-  ; INSTRREF-NEXT:   TEST8rr [[MOV32r0_]], [[COPY]], implicit-def $eflags
+  ; INSTRREF-NEXT:   TEST8rr [[DEF]], [[DEF1]], implicit-def $eflags
   ; INSTRREF-NEXT:   JCC_1 %bb.3, 5, implicit $eflags
   ; INSTRREF-NEXT:   JMP_1 %bb.2
   ; INSTRREF-NEXT: {{  $}}
@@ -78,7 +79,7 @@ define void @foo() {
   ; INSTRREF-NEXT: {{  $}}
   ; INSTRREF-NEXT:   [[MOVUPSrm:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i4, align 8)
   ; INSTRREF-NEXT:   MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm]] :: (store (s128) into `ptr null`, align 8)
-  ; INSTRREF-NEXT:   DBG_VALUE_LIST !3, !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 40, DW_OP_stack_value), %stack.1.i, %stack.1.i, debug-location !8
+  ; INSTRREF-NEXT:   DBG_VALUE_LIST !3, !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_plus_uconst, 40, DW_OP_stack_value), %stack.1.i, %stack.1.i,  debug-location !8
   ; INSTRREF-NEXT:   [[MOVUPSrm1:%[0-9]+]]:vr128 = MOVUPSrm %stack.1.i, 1, $noreg, 40, $noreg :: (load (s128) from %ir.i6, align 8)
   ; INSTRREF-NEXT:   MOVUPSmr $noreg, 1, $noreg, 0, $noreg, killed [[MOVUPSrm1]] :: (store (s128) into `ptr null`, align 8)
   ; INSTRREF-NEXT: {{  $}}
@@ -86,13 +87,12 @@ define void @foo() {
   ; INSTRREF-NEXT:   successors: %bb.1(0x80000000)
   ; INSTRREF-NEXT: {{  $}}
   ; INSTRREF-NEXT:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
-  ; INSTRREF-NEXT:   [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOV32r0_1]], %subreg.sub_32bit
   ; INSTRREF-NEXT:   $rdi = COPY [[SUBREG_TO_REG]]
   ; INSTRREF-NEXT:   $rsi = COPY [[SUBREG_TO_REG]]
   ; INSTRREF-NEXT:   $rdx = COPY [[SUBREG_TO_REG]]
-  ; INSTRREF-NEXT:   $ecx = COPY [[MOV32r0_1]]
+  ; INSTRREF-NEXT:   $ecx = COPY [[MOV32r0_]]
   ; INSTRREF-NEXT:   $r8 = COPY [[LEA64r]]
-  ; INSTRREF-NEXT:   CALL64r [[DEF]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit $ecx, implicit $r8, implicit-def $rsp, implicit-def $ssp
+  ; INSTRREF-NEXT:   CALL64r [[DEF2]], csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit $ecx, implicit $r8, implicit-def $rsp, implicit-def $ssp
   ; INSTRREF-NEXT:   ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
   ; INSTRREF-NEXT:   JMP_1 %bb.1
 bb_entry:
diff --git a/llvm/test/CodeGen/X86/reverse_branches.ll b/llvm/test/CodeGen/X86/reverse_branches.ll
index 93c82a4524ef9..b0b4b543c9f19 100644
--- a/llvm/test/CodeGen/X86/reverse_branches.ll
+++ b/llvm/test/CodeGen/X86/reverse_branches.ll
@@ -72,22 +72,22 @@ define i32 @test_branches_order() uwtable ssp {
 ; CHECK-NEXT:  LBB0_7: ## %for.end11
 ; CHECK-NEXT:    leaq L_.str2(%rip), %rdi
 ; CHECK-NEXT:    callq _puts
-; CHECK-NEXT:    xorl %eax, %eax
-; CHECK-NEXT:    movq %rsp, %rcx
+; CHECK-NEXT:    movq %rsp, %rax
+; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    jmp LBB0_8
 ; CHECK-NEXT:    .p2align 4
 ; CHECK-NEXT:  LBB0_15: ## %for.inc38
 ; CHECK-NEXT:    ## in Loop: Header=BB0_8 Depth=1
-; CHECK-NEXT:    incl %eax
+; CHECK-NEXT:    incl %ecx
 ; CHECK-NEXT:  LBB0_8: ## %for.cond14
 ; CHECK-NEXT:    ## =>This Loop Header: Depth=1
 ; CHECK-NEXT:    ## Child Loop BB0_10 Depth 2
 ; CHECK-NEXT:    ## Child Loop BB0_12 Depth 3
-; CHECK-NEXT:    cmpl $999, %eax ## imm = 0x3E7
+; CHECK-NEXT:    cmpl $999, %ecx ## imm = 0x3E7
 ; CHECK-NEXT:    jg LBB0_16
 ; CHECK-NEXT:  ## %bb.9: ## %for.cond18.preheader
 ; CHECK-NEXT:    ## in Loop: Header=BB0_8 Depth=1
-; CHECK-NEXT:    movq %rcx, %rdx
+; CHECK-NEXT:    movq %rax, %rdx
 ; CHECK-NEXT:    xorl %esi, %esi
 ; CHECK-NEXT:    xorl %edi, %edi
 ; CHECK-NEXT:    jmp LBB0_10



More information about the llvm-commits mailing list