[llvm] MachineTraceMetrics: always include instr latency in depth (PR #73550)

Ramkumar Ramachandra via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 27 09:56:45 PST 2023


https://github.com/artagnon created https://github.com/llvm/llvm-project/pull/73550

Fix the depth calculation in MachineTraceMetrics to include the latency of the current instruction when all dependencies are transient, or when there are no dependencies. This is the right thing to do, as demonstrated by changes to the machine-trace-metrics-depth-height test: it no longer reports false depths of 0. This simple change results in better CodeGen on the X86, AArch64, PowerPC, AMDGPU, and RISCV targets.

-- 8< --
Based on #73324. Please review only second patch.

>From 7da7fa262383efa7171d85de8301d1954e4b67b6 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <Ramkumar.Ramachandra at imgtec.com>
Date: Fri, 24 Nov 2023 12:39:39 +0000
Subject: [PATCH 1/2] MachineTraceMetrics: add test for depth, height
 calculation

MTM's depth and height calculation is untested, and wrong in several
places that involve PHIs or load-stores. Add a test under the RISC-V
target to document this behavior. A follow-up could potentially fix the
calculation in MTM.
---
 .../machine-trace-metrics-depth-height.ll     | 177 ++++++++++++++++++
 1 file changed, 177 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll

diff --git a/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll b/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll
new file mode 100644
index 000000000000000..da6c15a5845d55a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll
@@ -0,0 +1,177 @@
+; RUN: llc -mtriple=riscv64 -mattr=+m,+v -debug-only=machine-trace-metrics %s -o /dev/null 2>&1 | FileCheck %s
+
+ at heap_ptr = global ptr null, align 8
+ at heap_end = global ptr null, align 8
+ at heap_requested = global i64 0, align 8
+
+define ptr @mtm_depth_height(ptr %ptr, i64 %size) {
+; CHECK:      Computing MinInstr trace through [[BB1:%bb.[0-9]+]]
+; CHECK-NEXT:   pred for [[BB0:%bb.[0-9]+]]: null
+; CHECK-NEXT:   pred for [[BB1]]: [[BB0]]
+; CHECK-NEXT:   succ for [[BB10:%bb.[0-9]+]]: null
+; CHECK-NEXT:   succ for [[BB14:%bb.[0-9]+]]: [[BB10]]
+; CHECK-NEXT:   succ for [[BB15:%bb.[0-9]+]]: [[BB10]]
+; CHECK-NEXT:   succ for [[BB9:%bb.[0-9]+]]: null
+; CHECK-NEXT:   succ for [[BB8:%bb.[0-9]+]]: [[BB9]]
+; CHECK-NEXT:   succ for [[BB16:%bb.[0-9]+]]: [[BB8]]
+; CHECK-NEXT:   succ for [[BB17:%bb.[0-9]+]]: [[BB8]]
+; CHECK-NEXT:   succ for [[BB6:%bb.[0-9]+]]: null
+; CHECK-NEXT:   succ for [[BB5:%bb.[0-9]+]]: [[BB6]]
+; CHECK-NEXT:   succ for [[BB4:%bb.[0-9]+]]: [[BB5]]
+; CHECK-NEXT:   succ for [[BB12:%bb.[0-9]+]]: [[BB16]]
+; CHECK-NEXT:   succ for [[BB11:%bb.[0-9]+]]: [[BB12]]
+; CHECK-NEXT:   succ for [[BB3:%bb.[0-9]+]]: [[BB11]]
+; CHECK-NEXT:   succ for [[BB2:%bb.[0-9]+]]: [[BB15]]
+; CHECK-NEXT:   succ for [[BB1]]: [[BB14]]
+; CHECK-EMPTY:
+; CHECK-NEXT: Depths for [[BB0]]:
+; CHECK-NEXT:       0 Instructions
+; CHECK-NEXT: 0	[[R22:%[0-9]+]]:gpr = COPY [[X11:\$x[0-9]+]]
+; CHECK-NEXT: 0	[[R21:%[0-9]+]]:gpr = COPY [[X10:\$x[0-9]+]]
+; CHECK-NEXT: 0	[[R24:%[0-9]+]]:gpr = SLTIU [[R21]]:gpr, 1
+; CHECK-NEXT: 0	[[R25:%[0-9]+]]:gpr = SLTIU [[R22]]:gpr, 1
+; CHECK-NEXT: 1	[[R26:%[0-9]+]]:gpr = OR killed [[R24]]:gpr
+; CHECK-NEXT: 2	BEQ killed [[R26]]:gpr, [[X0:\$x[0-9]+]], [[BB1]]
+; CHECK-EMPTY:
+; CHECK-NEXT: Depths for [[BB1]]:
+; CHECK-NEXT:       4 Instructions
+; CHECK-NEXT: 0	[[R29:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_ptr
+; CHECK-NEXT: 1	[[R0:%[0-9]+]]:gpr = LD [[R29]]:gpr, target-flags(riscv-lo) @heap_ptr 
+; CHECK-NEXT: 5	[[R30:%[0-9]+]]:gpr = ADD [[R0]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 0	[[R31:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_requested
+; CHECK-NEXT: 1	[[R32:%[0-9]+]]:gpr = LD [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
+; CHECK-NEXT: 5	[[R33:%[0-9]+]]:gpr = ADD killed [[R32]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 6	[[R34:%[0-9]+]]:gpr = ANDI [[R30]]:gpr, 7
+; CHECK-NEXT: 7	[[R35:%[0-9]+]]:gpr = SLTIU [[R34]]:gpr, 1
+; CHECK-NEXT: 0	[[R36:%[0-9]+]]:gpr = ADDI [[X0]], 8
+; CHECK-NEXT: 7	[[R37:%[0-9]+]]:gpr = nuw nsw SUB killed [[R36]]:gpr, [[R34]]:gpr
+; CHECK-NEXT: 8	[[R38:%[0-9]+]]:gpr = ADDI killed [[R35]]:gpr, -1
+; CHECK-NEXT: 9	[[R39:%[0-9]+]]:gpr = AND killed [[R38]]:gpr, killed [[R37]]:gpr
+; CHECK-NEXT: 10	[[R40:%[0-9]+]]:gpr = ADD killed [[R33]]:gpr, [[R39]]:gpr
+; CHECK-NEXT: 10	[[R1:%[0-9]+]]:gpr = ADD [[R30]]:gpr, [[R39:%[0-9]+]]:gpr
+; CHECK-NEXT: 11	SD killed [[R40]]:gpr, [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
+; CHECK-NEXT: 0	[[R41:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_end
+; CHECK-NEXT: 1	[[R42:%[0-9]+]]:gpr = LD killed [[R41]]:gpr, target-flags(riscv-lo) @heap_end 
+; CHECK-NEXT: 11	BGEU killed [[R42]]:gpr, [[R1]]:gpr, [[BB2]]
+; CHECK-NEXT: Heights for [[BB10]]:
+; CHECK-NEXT:       1 Instructions
+; CHECK-NEXT: 0	PseudoRET implicit [[X10]]
+; CHECK-NEXT: 0	[[X10]] = COPY [[R20:%[0-9]+]]:gpr
+; CHECK-NEXT: 0	[[R20]]:gpr = PHI [[R23:%[0-9]+]]:gpr, [[BB13:%bb.[0-9]+]], [[R28:%[0-9]+]]:gpr, [[BB14]], [[R44:%[0-9]+]]:gpr, [[BB15]], [[R0]]:gpr, [[BB7:%bb.[0-9]+]], [[R0]]:gpr, [[BB9]]
+; CHECK-NEXT: [[BB10]] Live-ins:
+; CHECK-NEXT: Heights for [[BB14]]:
+; CHECK-NEXT:       2 Instructions
+; CHECK-NEXT: pred	0	[[R20]]:gpr = PHI [[R23]]:gpr, [[BB13]], [[R28]]:gpr, [[BB14]], [[R44]]:gpr, [[BB15]], [[R0]]:gpr, [[BB7]], [[R0]]:gpr, [[BB9]]
+; CHECK-NEXT: 0	PseudoBR [[BB10]]
+; CHECK-NEXT: 0	[[R28]]:gpr = COPY [[R43:%[0-9]+]]:gpr
+; CHECK-NEXT: 0	[[R43]]:gpr = COPY [[X0]]
+; CHECK-NEXT: [[BB14]] Live-ins: X0 at 0
+; CHECK-NEXT: Heights for [[BB1]]:
+; CHECK-NEXT:      20 Instructions
+; CHECK-NEXT: 11	0	BGEU killed [[R42]]:gpr, [[R1]]:gpr, [[BB2]]
+; CHECK-NEXT: 11	4	[[R42]]:gpr = LD killed [[R41]]:gpr, target-flags(riscv-lo) @heap_end 
+; CHECK-NEXT: 11	5	[[R41]]:gpr = LUI target-flags(riscv-hi) @heap_end
+; CHECK-NEXT: 11	0	SD killed [[R40]]:gpr, [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
+; CHECK-NEXT: 11	1	[[R1]]:gpr = ADD [[R30]]:gpr, [[R39]]:gpr
+; CHECK-NEXT: 11	1	[[R40]]:gpr = ADD killed [[R33]]:gpr, [[R39]]:gpr
+; CHECK-NEXT: 11	2	[[R39]]:gpr = AND killed [[R38]]:gpr, killed [[R37]]:gpr
+; CHECK-NEXT: 11	3	[[R38]]:gpr = ADDI killed [[R35]]:gpr, -1
+; CHECK-NEXT: 11	3	[[R37]]:gpr = nuw nsw SUB killed [[R36]]:gpr, [[R34]]:gpr
+; CHECK-NEXT: 11	4	[[R36]]:gpr = ADDI [[X0]], 8
+; CHECK-NEXT: 11	4	[[R35]]:gpr = SLTIU [[R34]]:gpr, 1
+; CHECK-NEXT: 11	5	[[R34]]:gpr = ANDI [[R30]]:gpr, 7
+; CHECK-NEXT: 11	2	[[R33]]:gpr = ADD killed [[R32]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 11	6	[[R32]]:gpr = LD [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
+; CHECK-NEXT: 11	7	[[R31]]:gpr = LUI target-flags(riscv-hi) @heap_requested
+; CHECK-NEXT: 11	6	[[R30]]:gpr = ADD [[R0]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 11	10	[[R0]]:gpr = LD [[R29]]:gpr, target-flags(riscv-lo) @heap_ptr 
+; CHECK-NEXT: 11	11	[[R29]]:gpr = LUI target-flags(riscv-hi) @heap_ptr
+; CHECK-NEXT: [[BB1]] Live-ins: [[R22]]@6 X0 at 4
+; CHECK-NEXT: Critical path: 11
+entry:
+  %ptrint = ptrtoint ptr %ptr to i64
+  %cmp = icmp eq ptr %ptr, null
+  %cmp.i = icmp eq i64 %size, 0
+  %or.cond = or i1 %cmp, %cmp.i
+  br i1 %or.cond, label %return, label %if.end
+
+if.end:                                          ; preds = %entry
+  %0 = load ptr, ptr @heap_ptr, align 8
+  %1 = ptrtoint ptr %0 to i64
+  %add.ptr = getelementptr inbounds i8, ptr %0, i64 %size
+  %2 = load i64, ptr @heap_requested, align 8
+  %add = add i64 %2, %size
+  %3 = ptrtoint ptr %add.ptr to i64
+  %rem = and i64 %3, 7
+  %cmp.eq = icmp eq i64 %rem, 0
+  %sub = sub nuw nsw i64 8, %rem
+  %sel = select i1 %cmp.eq, i64 0, i64 %sub
+  %storemerge = add i64 %add, %sel
+  %next_heap_ptr = getelementptr inbounds i8, ptr %add.ptr, i64 %sel
+  store i64 %storemerge, ptr @heap_requested, align 8
+  %4 = load ptr, ptr @heap_end, align 8
+  %cmp.ugt = icmp ugt ptr %next_heap_ptr, %4
+  br i1 %cmp.ugt, label %return, label %exit
+
+exit:                                            ; preds = %if.end
+  store ptr %next_heap_ptr, ptr @heap_ptr, align 8
+  %cmp.not = icmp eq ptr %0, null
+  br i1 %cmp.not, label %return, label %ph
+
+ph:                                              ; preds = %exit
+  %5 = tail call i64 @llvm.vscale.i64()
+  %6 = shl nuw nsw i64 %5, 4
+  %7 = tail call i64 @llvm.umax.i64(i64 %6, i64 32)
+  %min.iters.check = icmp ugt i64 %7, %size
+  br i1 %min.iters.check, label %for.ph, label %vector.memcheck
+
+vector.memcheck:                                  ; preds = %ph
+  %8 = tail call i64 @llvm.vscale.i64()
+  %9 = shl nuw nsw i64 %8, 4
+  %10 = sub i64 %1, %ptrint
+  %diff.check = icmp ult i64 %10, %9
+  br i1 %diff.check, label %for.ph, label %vector.ph
+
+vector.ph:                                        ; preds = %vector.memcheck
+  %11 = tail call i64 @llvm.vscale.i64()
+  %.neg = mul nsw i64 %11, -16
+  %n.vec = and i64 %.neg, %size
+  %12 = tail call i64 @llvm.vscale.i64()
+  %13 = shl nuw nsw i64 %12, 4
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %14 = getelementptr inbounds i8, ptr %ptr, i64 %index
+  %wide.load = load <vscale x 16 x i8>, ptr %14, align 1
+  %15 = getelementptr inbounds i8, ptr %0, i64 %index
+  store <vscale x 16 x i8> %wide.load, ptr %15, align 1
+  %index.next = add nuw i64 %index, %13
+  %16 = icmp eq i64 %index.next, %n.vec
+  br i1 %16, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.vec, %size
+  br i1 %cmp.n, label %return, label %for.ph
+
+for.ph:                                           ; preds = %vector.memcheck, %ph, %middle.block
+  %i.ph = phi i64 [ 0, %vector.memcheck ], [ 0, %ph ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.body:                                         ; preds = %for.ph, %for.body
+  %i = phi i64 [ %inc, %for.body ], [ %i.ph, %for.ph ]
+  %arrayidx = getelementptr inbounds i8, ptr %ptr, i64 %i
+  %17 = load i8, ptr %arrayidx, align 1
+  %arrayidx.st = getelementptr inbounds i8, ptr %0, i64 %i
+  store i8 %17, ptr %arrayidx.st, align 1
+  %inc = add nuw i64 %i, 1
+  %exitcond.not = icmp eq i64 %inc, %size
+  br i1 %exitcond.not, label %return, label %for.body
+
+return:                                           ; preds = %for.body, %middle.block, %if.end, %exit, %entry
+  %retval = phi ptr [ null, %entry ], [ null, %exit ], [ null, %if.end ], [ %0, %middle.block ], [ %0, %for.body ]
+  ret ptr %retval
+}
+
+declare i64 @llvm.vscale.i64()
+declare i64 @llvm.umax.i64(i64, i64)

>From c6deea1db3ed3089410b031d00bdc9987f244853 Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <Ramkumar.Ramachandra at imgtec.com>
Date: Mon, 27 Nov 2023 11:38:18 +0000
Subject: [PATCH 2/2] MachineTraceMetrics: always include instr latency in
 depth

Fix the depth calculation in MachineTraceMetrics to include the latency
of the current instruction when all dependencies are transient, or when
there are no dependencies. This is the right thing to do, as
demonstrated by changes to the machine-trace-metrics-depth-height test:
it no longer reports false depths of 0. This simple change results in
better CodeGen on the X86, AArch64, PowerPC, AMDGPU, and RISCV targets.
---
 llvm/lib/CodeGen/MachineTraceMetrics.cpp      |  14 +-
 .../early-ifcvt-likely-predictable.mir        |  32 +-
 .../CodeGen/AArch64/logical_shifted_reg.ll    |  15 +-
 llvm/test/CodeGen/AArch64/machine_cse.ll      |  23 +-
 llvm/test/CodeGen/AArch64/neon-dotreduce.ll   | 256 ++++-----
 llvm/test/CodeGen/AArch64/tbl-loops.ll        | 192 +++----
 llvm/test/CodeGen/AMDGPU/early-if-convert.ll  | 506 ++++++++++++----
 llvm/test/CodeGen/AMDGPU/wave32.ll            | 543 ++++++------------
 llvm/test/CodeGen/PowerPC/machine-combiner.ll | 345 ++++++++---
 .../machine-trace-metrics-depth-height.ll     |  82 +--
 .../rvv/fixed-vectors-int-explodevector.ll    |  24 +-
 .../rvv/fixed-vectors-strided-load-combine.ll |   2 +-
 llvm/test/CodeGen/X86/alias-static-alloca.ll  |   6 +-
 llvm/test/CodeGen/X86/avx-vinsertf128.ll      |  12 +-
 llvm/test/CodeGen/X86/avx512-regcall-Mask.ll  |   8 +-
 llvm/test/CodeGen/X86/avx512vnni-combine.ll   |   9 +-
 llvm/test/CodeGen/X86/avx512vnni.ll           |  18 +-
 llvm/test/CodeGen/X86/avx_vnni-intrinsics.ll  |   6 +-
 llvm/test/CodeGen/X86/avxvnni-combine.ll      |  36 +-
 llvm/test/CodeGen/X86/avxvnni.ll              |  72 ++-
 llvm/test/CodeGen/X86/early-ifcvt-remarks.ll  |  21 +-
 .../X86/gather-scatter-opaque-ptr-2.ll        |   2 +-
 .../CodeGen/X86/gather-scatter-opaque-ptr.ll  |   4 +-
 llvm/test/CodeGen/X86/hipe-cc.ll              |   2 +-
 llvm/test/CodeGen/X86/hipe-cc64.ll            |   6 +-
 llvm/test/CodeGen/X86/imul.ll                 |   7 +-
 llvm/test/CodeGen/X86/lea-opt2.ll             |   8 +-
 .../test/CodeGen/X86/machine-combiner-dbg.mir |  23 +-
 .../machine-trace-metrics-entryBB-critpath.ll |   6 +-
 .../test/CodeGen/X86/masked_gather_scatter.ll |   8 +-
 llvm/test/CodeGen/X86/mul-constant-i64.ll     |   7 +-
 llvm/test/CodeGen/X86/no-split-size.ll        |  18 +-
 .../X86/stack-folding-int-avx512vnni.ll       |   6 +-
 .../CodeGen/X86/stack-folding-int-avxvnni.ll  |  12 +-
 llvm/test/CodeGen/X86/umul-with-overflow.ll   | 217 +++----
 llvm/test/CodeGen/X86/vector-fshr-128.ll      |   4 +-
 .../MC/AArch64/local-bounds-single-trap.ll    |  62 +-
 37 files changed, 1470 insertions(+), 1144 deletions(-)

diff --git a/llvm/lib/CodeGen/MachineTraceMetrics.cpp b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
index 3e6f36fe936fff0..34a549494de0906 100644
--- a/llvm/lib/CodeGen/MachineTraceMetrics.cpp
+++ b/llvm/lib/CodeGen/MachineTraceMetrics.cpp
@@ -817,6 +817,11 @@ updateDepth(MachineTraceMetrics::TraceBlockInfo &TBI, const MachineInstr &UseMI,
         .computeOperandLatency(Dep.DefMI, Dep.DefOp, &UseMI, Dep.UseOp);
     Cycle = std::max(Cycle, DepCycle);
   }
+  // If there are no deps, or all the deps are transient, compute the latency of
+  // the instruction.
+  if (!Cycle)
+    Cycle = MTM.SchedModel.computeInstrLatency(&UseMI);
+
   // Remember the instruction depth.
   InstrCycles &MICycles = Cycles[&UseMI];
   MICycles.Depth = Cycle;
@@ -926,12 +931,11 @@ static unsigned updatePhysDepsUpwards(const MachineInstr &MI, unsigned Height,
       if (I == RegUnits.end())
         continue;
       unsigned DepHeight = I->Cycle;
-      if (!MI.isTransient()) {
+      if (!MI.isTransient())
         // We may not know the UseMI of this dependency, if it came from the
         // live-in list. SchedModel can handle a NULL UseMI.
         DepHeight += SchedModel.computeOperandLatency(&MI, MO.getOperandNo(),
                                                       I->MI, I->Op);
-      }
       Height = std::max(Height, DepHeight);
       // This regunit is dead above MI.
       RegUnits.erase(I);
@@ -1111,10 +1115,8 @@ computeInstrHeights(const MachineBasicBlock *MBB) {
       // Don't process PHI deps. They depend on the specific predecessor, and
       // we'll get them when visiting the predecessor.
       Deps.clear();
-      bool HasPhysRegs = !MI.isPHI() && getDataDeps(MI, Deps, MTM.MRI);
-
-      // There may also be regunit dependencies to include in the height.
-      if (HasPhysRegs)
+      if (!MI.isPHI() && getDataDeps(MI, Deps, MTM.MRI))
+        // There may also be regunit dependencies to include in the height.
         Cycle = updatePhysDepsUpwards(MI, Cycle, RegUnits, MTM.SchedModel,
                                       MTM.TII, MTM.TRI);
 
diff --git a/llvm/test/CodeGen/AArch64/early-ifcvt-likely-predictable.mir b/llvm/test/CodeGen/AArch64/early-ifcvt-likely-predictable.mir
index 425a23214871d68..1c160232849a35a 100644
--- a/llvm/test/CodeGen/AArch64/early-ifcvt-likely-predictable.mir
+++ b/llvm/test/CodeGen/AArch64/early-ifcvt-likely-predictable.mir
@@ -180,28 +180,36 @@ body:             |
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr64 = COPY $x0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1:
-  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT:   successors: %bb.3(0x30000000), %bb.2(0x50000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr64sp = PHI [[COPY2]], %bb.0, %4, %bb.1
-  ; CHECK-NEXT:   [[LDRBBui:%[0-9]+]]:gpr32common = LDRBBui [[PHI]], 0 :: (load (s8))
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:gpr64sp = PHI [[COPY2]], %bb.0, %4, %bb.3
+  ; CHECK-NEXT:   [[LDRBBui:%[0-9]+]]:gpr32 = LDRBBui [[PHI]], 0 :: (load (s8))
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr32all = COPY $wzr
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr32 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gpr32all = COPY [[COPY3]]
+  ; CHECK-NEXT:   CBZW killed [[LDRBBui]], %bb.3
+  ; CHECK-NEXT:   B %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2:
+  ; CHECK-NEXT:   successors: %bb.3(0x80000000)
+  ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[SUBSWri:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 4080, 12, implicit-def $nzcv
   ; CHECK-NEXT:   [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16711680
   ; CHECK-NEXT:   [[CSELWr:%[0-9]+]]:gpr32common = CSELWr [[COPY]], killed [[MOVi32imm]], 11, implicit $nzcv
   ; CHECK-NEXT:   [[SUBSWri1:%[0-9]+]]:gpr32 = SUBSWri [[CSELWr]], 0, 0, implicit-def $nzcv
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:gpr32 = COPY $wzr
   ; CHECK-NEXT:   [[CSELWr1:%[0-9]+]]:gpr32 = CSELWr [[CSELWr]], [[COPY5]], 12, implicit $nzcv
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr32 = COPY [[CSELWr1]]
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:gpr32all = COPY [[CSELWr1]]
   ; CHECK-NEXT:   [[SUBSWri2:%[0-9]+]]:gpr32 = SUBSWri [[COPY]], 0, 0, implicit-def $nzcv
   ; CHECK-NEXT:   [[CSELWr2:%[0-9]+]]:gpr32 = CSELWr [[COPY]], [[COPY5]], 12, implicit $nzcv
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:gpr32 = COPY [[CSELWr2]]
-  ; CHECK-NEXT:   $wzr = SUBSWri [[LDRBBui]], 0, 0, implicit-def $nzcv
-  ; CHECK-NEXT:   [[CSELWr3:%[0-9]+]]:gpr32 = CSELWr [[COPY4]], [[COPY6]], 0, implicit $nzcv
-  ; CHECK-NEXT:   $wzr = SUBSWri [[LDRBBui]], 0, 0, implicit-def $nzcv
-  ; CHECK-NEXT:   [[CSELWr4:%[0-9]+]]:gpr32 = CSELWr [[COPY4]], [[COPY7]], 0, implicit $nzcv
-  ; CHECK-NEXT:   STRBBui [[CSELWr4]], [[COPY1]], 0 :: (store (s8))
-  ; CHECK-NEXT:   early-clobber %20:gpr64sp = STRBBpost [[CSELWr3]], [[PHI]], 1 :: (store (s8))
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:gpr32all = COPY [[CSELWr2]]
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3:
+  ; CHECK-NEXT:   successors: %bb.1(0x80000000)
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:gpr32 = PHI [[COPY4]], %bb.1, [[COPY6]], %bb.2
+  ; CHECK-NEXT:   [[PHI2:%[0-9]+]]:gpr32 = PHI [[COPY4]], %bb.1, [[COPY7]], %bb.2
+  ; CHECK-NEXT:   STRBBui [[PHI2]], [[COPY1]], 0 :: (store (s8))
+  ; CHECK-NEXT:   early-clobber %20:gpr64sp = STRBBpost [[PHI1]], [[PHI]], 1 :: (store (s8))
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:gpr64all = COPY %20
   ; CHECK-NEXT:   B %bb.1
   bb.0:
diff --git a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
index c8c1e9007c7a0f5..aa54c7441de3a74 100644
--- a/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
+++ b/llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
@@ -252,17 +252,16 @@ define void @flag_setting() {
 ; CHECK-NEXT:    ldr x9, [x8]
 ; CHECK-NEXT:    ldr x10, [x10]
 ; CHECK-NEXT:    tst x9, x10
-; CHECK-NEXT:    b.gt .LBB2_4
+; CHECK-NEXT:    b.gt .LBB2_2
 ; CHECK-NEXT:  // %bb.1: // %test2
+; CHECK-NEXT:    and x11, x9, x10, asr #12
 ; CHECK-NEXT:    tst x9, x10, lsl #63
-; CHECK-NEXT:    b.lt .LBB2_4
-; CHECK-NEXT:  // %bb.2: // %test3
-; CHECK-NEXT:    and x10, x9, x10, asr #12
-; CHECK-NEXT:    cmp x10, #1
-; CHECK-NEXT:    b.ge .LBB2_4
-; CHECK-NEXT:  // %bb.3: // %other_exit
+; CHECK-NEXT:    ccmp x11, #1, #0, ge
+; CHECK-NEXT:    b.lt .LBB2_3
+; CHECK-NEXT:  .LBB2_2: // %common.ret
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_3: // %other_exit
 ; CHECK-NEXT:    str x9, [x8]
-; CHECK-NEXT:  .LBB2_4: // %common.ret
 ; CHECK-NEXT:    ret
   %val1 = load i64, ptr @var1_64
   %val2 = load i64, ptr @var2_64
diff --git a/llvm/test/CodeGen/AArch64/machine_cse.ll b/llvm/test/CodeGen/AArch64/machine_cse.ll
index 6478f5a37f7826f..d7982670e5678e0 100644
--- a/llvm/test/CodeGen/AArch64/machine_cse.ll
+++ b/llvm/test/CodeGen/AArch64/machine_cse.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc < %s -mtriple=aarch64-linux-gnuabi -O2 -tail-dup-placement=0 | FileCheck %s
 ; -tail-dup-placement causes tail duplication during layout. This breaks the
 ; assumptions of the test case as written (specifically, it creates an
@@ -12,10 +13,6 @@
 @e = external global i32
 
 define i32 @combine-sign-comparisons-by-cse(ptr %arg) {
-; CHECK: cmp
-; CHECK: b.ge
-; CHECK-NOT: cmp
-; CHECK: b.le
 
 entry:
   %a = load i32, ptr @a, align 4
@@ -50,10 +47,11 @@ return:
 
 define void @combine_vector_zeros(ptr %p, ptr %q) {
 ; CHECK-LABEL: combine_vector_zeros:
-; CHECK: movi v[[REG:[0-9]+]].2d, #0
-; CHECK-NOT: movi
-; CHECK: str d[[REG]], [x0]
-; CHECK: str q[[REG]], [x1]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
 entry:
   store <8 x i8> zeroinitializer, ptr %p
   store <16 x i8> zeroinitializer, ptr %q
@@ -62,10 +60,11 @@ entry:
 
 define void @combine_vector_ones(ptr %p, ptr %q) {
 ; CHECK-LABEL: combine_vector_ones:
-; CHECK: movi v[[REG:[0-9]+]].2d, #0xffffffffffffffff
-; CHECK-NOT: movi
-; CHECK: str d[[REG]], [x0]
-; CHECK: str q[[REG]], [x1]
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    movi v0.2d, #0xffffffffffffffff
+; CHECK-NEXT:    str d0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
 entry:
   store <2 x i32> <i32 -1, i32 -1>, ptr %p
   store <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, ptr %q
diff --git a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
index 706aa4ad1b46653..b6f91faeb639ab9 100644
--- a/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
+++ b/llvm/test/CodeGen/AArch64/neon-dotreduce.ll
@@ -2018,153 +2018,153 @@ define i32 @test_sdot_v33i8_double_nomla(<33 x i8> %a, <33 x i8> %b, <33 x i8> %
 ; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w29, -16
-; CHECK-NEXT:    ldr b1, [sp, #80]
-; CHECK-NEXT:    add x8, sp, #88
-; CHECK-NEXT:    ldr b2, [sp, #144]
-; CHECK-NEXT:    add x9, sp, #152
+; CHECK-NEXT:    ldr b0, [sp, #80]
+; CHECK-NEXT:    fmov s2, w0
+; CHECK-NEXT:    add x10, sp, #88
 ; CHECK-NEXT:    ldr b3, [sp, #16]
-; CHECK-NEXT:    add x12, sp, #32
-; CHECK-NEXT:    ld1 { v1.b }[1], [x8]
-; CHECK-NEXT:    ld1 { v2.b }[1], [x9]
+; CHECK-NEXT:    ldr b1, [sp, #144]
+; CHECK-NEXT:    add x11, sp, #24
+; CHECK-NEXT:    ld1 { v0.b }[1], [x10]
+; CHECK-NEXT:    ldr b5, [sp, #480]
+; CHECK-NEXT:    add x10, sp, #152
+; CHECK-NEXT:    ld1 { v3.b }[1], [x11]
+; CHECK-NEXT:    add x11, sp, #488
+; CHECK-NEXT:    ldr b6, [sp, #544]
+; CHECK-NEXT:    mov v2.b[1], w1
 ; CHECK-NEXT:    add x9, sp, #96
-; CHECK-NEXT:    add x8, sp, #24
-; CHECK-NEXT:    add x11, sp, #112
-; CHECK-NEXT:    fmov s0, w0
-; CHECK-NEXT:    ld1 { v3.b }[1], [x8]
-; CHECK-NEXT:    add x8, sp, #160
-; CHECK-NEXT:    ldr b4, [sp, #480]
-; CHECK-NEXT:    ld1 { v1.b }[2], [x9]
-; CHECK-NEXT:    add x9, sp, #104
-; CHECK-NEXT:    ld1 { v2.b }[2], [x8]
-; CHECK-NEXT:    add x8, sp, #168
-; CHECK-NEXT:    add x10, sp, #120
-; CHECK-NEXT:    add x13, sp, #48
-; CHECK-NEXT:    ld1 { v3.b }[2], [x12]
-; CHECK-NEXT:    add x12, sp, #40
-; CHECK-NEXT:    ldr b5, [sp, #608]
-; CHECK-NEXT:    ld1 { v1.b }[3], [x9]
-; CHECK-NEXT:    ld1 { v2.b }[3], [x8]
-; CHECK-NEXT:    mov v0.b[1], w1
-; CHECK-NEXT:    add x9, sp, #128
-; CHECK-NEXT:    add x14, sp, #184
-; CHECK-NEXT:    ldr b16, [sp, #544]
-; CHECK-NEXT:    ld1 { v3.b }[3], [x12]
-; CHECK-NEXT:    add x12, sp, #176
-; CHECK-NEXT:    ldr b17, [sp, #672]
+; CHECK-NEXT:    ld1 { v1.b }[1], [x10]
+; CHECK-NEXT:    ld1 { v5.b }[1], [x11]
+; CHECK-NEXT:    add x11, sp, #552
+; CHECK-NEXT:    ldr b7, [sp, #672]
+; CHECK-NEXT:    ld1 { v6.b }[1], [x11]
+; CHECK-NEXT:    add x11, sp, #680
+; CHECK-NEXT:    ld1 { v0.b }[2], [x9]
+; CHECK-NEXT:    ld1 { v7.b }[1], [x11]
+; CHECK-NEXT:    add x11, sp, #160
+; CHECK-NEXT:    add x8, sp, #104
+; CHECK-NEXT:    ld1 { v1.b }[2], [x11]
+; CHECK-NEXT:    add x11, sp, #32
+; CHECK-NEXT:    mov v2.b[2], w2
+; CHECK-NEXT:    ldr b4, [sp, #608]
+; CHECK-NEXT:    ld1 { v3.b }[2], [x11]
+; CHECK-NEXT:    add x11, sp, #496
+; CHECK-NEXT:    ld1 { v0.b }[3], [x8]
+; CHECK-NEXT:    add x10, sp, #616
+; CHECK-NEXT:    ld1 { v5.b }[2], [x11]
+; CHECK-NEXT:    add x11, sp, #168
+; CHECK-NEXT:    ld1 { v4.b }[1], [x10]
+; CHECK-NEXT:    add x10, sp, #112
+; CHECK-NEXT:    add x8, sp, #40
+; CHECK-NEXT:    ld1 { v1.b }[3], [x11]
+; CHECK-NEXT:    mov v2.b[3], w3
+; CHECK-NEXT:    ld1 { v3.b }[3], [x8]
+; CHECK-NEXT:    ld1 { v0.b }[4], [x10]
+; CHECK-NEXT:    add x11, sp, #176
+; CHECK-NEXT:    add x8, sp, #624
+; CHECK-NEXT:    add x12, sp, #120
+; CHECK-NEXT:    ldr b16, [sp, #208]
+; CHECK-NEXT:    ld1 { v4.b }[2], [x8]
+; CHECK-NEXT:    add x8, sp, #48
 ; CHECK-NEXT:    ld1 { v1.b }[4], [x11]
-; CHECK-NEXT:    add x11, sp, #488
-; CHECK-NEXT:    ld1 { v2.b }[4], [x12]
-; CHECK-NEXT:    ld1 { v4.b }[1], [x11]
-; CHECK-NEXT:    mov v0.b[2], w2
-; CHECK-NEXT:    add x11, sp, #192
-; CHECK-NEXT:    ld1 { v3.b }[4], [x13]
-; CHECK-NEXT:    add x13, sp, #616
-; CHECK-NEXT:    add x12, sp, #56
-; CHECK-NEXT:    ld1 { v1.b }[5], [x10]
-; CHECK-NEXT:    ld1 { v5.b }[1], [x13]
-; CHECK-NEXT:    add x13, sp, #496
-; CHECK-NEXT:    ld1 { v4.b }[2], [x13]
-; CHECK-NEXT:    ld1 { v2.b }[5], [x14]
-; CHECK-NEXT:    add x14, sp, #680
-; CHECK-NEXT:    ld1 { v17.b }[1], [x14]
-; CHECK-NEXT:    add x13, sp, #504
-; CHECK-NEXT:    ld1 { v3.b }[5], [x12]
-; CHECK-NEXT:    ld1 { v1.b }[6], [x9]
-; CHECK-NEXT:    add x9, sp, #552
-; CHECK-NEXT:    add x12, sp, #688
-; CHECK-NEXT:    ld1 { v16.b }[1], [x9]
-; CHECK-NEXT:    add x9, sp, #624
-; CHECK-NEXT:    ld1 { v4.b }[3], [x13]
-; CHECK-NEXT:    ld1 { v2.b }[6], [x11]
+; CHECK-NEXT:    ld1 { v3.b }[4], [x8]
+; CHECK-NEXT:    ld1 { v0.b }[5], [x12]
+; CHECK-NEXT:    add x15, sp, #184
+; CHECK-NEXT:    add x9, sp, #128
+; CHECK-NEXT:    mov v2.b[4], w4
+; CHECK-NEXT:    add x14, sp, #504
+; CHECK-NEXT:    add x11, sp, #56
+; CHECK-NEXT:    ld1 { v1.b }[5], [x15]
+; CHECK-NEXT:    ld1 { v5.b }[3], [x14]
+; CHECK-NEXT:    sshll v18.8h, v16.8b, #0
+; CHECK-NEXT:    ld1 { v3.b }[5], [x11]
 ; CHECK-NEXT:    add x11, sp, #560
-; CHECK-NEXT:    add x8, sp, #136
-; CHECK-NEXT:    ld1 { v17.b }[2], [x12]
-; CHECK-NEXT:    ld1 { v5.b }[2], [x9]
-; CHECK-NEXT:    ld1 { v1.b }[7], [x8]
-; CHECK-NEXT:    ld1 { v16.b }[2], [x11]
-; CHECK-NEXT:    add x8, sp, #512
-; CHECK-NEXT:    mov v0.b[3], w3
-; CHECK-NEXT:    ld1 { v4.b }[4], [x8]
-; CHECK-NEXT:    add x8, sp, #568
-; CHECK-NEXT:    add x9, sp, #696
-; CHECK-NEXT:    add x11, sp, #632
-; CHECK-NEXT:    ld1 { v17.b }[3], [x9]
-; CHECK-NEXT:    add x9, sp, #520
-; CHECK-NEXT:    ld1 { v16.b }[3], [x8]
-; CHECK-NEXT:    ld1 { v5.b }[3], [x11]
-; CHECK-NEXT:    add x8, sp, #640
-; CHECK-NEXT:    ld1 { v4.b }[5], [x9]
-; CHECK-NEXT:    add x9, sp, #576
+; CHECK-NEXT:    ld1 { v0.b }[6], [x9]
+; CHECK-NEXT:    add x9, sp, #688
+; CHECK-NEXT:    add x14, sp, #192
+; CHECK-NEXT:    ld1 { v6.b }[2], [x11]
+; CHECK-NEXT:    ld1 { v7.b }[2], [x9]
+; CHECK-NEXT:    movi v17.2d, #0000000000000000
+; CHECK-NEXT:    add x12, sp, #512
+; CHECK-NEXT:    ld1 { v1.b }[6], [x14]
+; CHECK-NEXT:    mov v2.b[5], w5
+; CHECK-NEXT:    add x15, sp, #568
+; CHECK-NEXT:    sshll v18.4s, v18.4h, #0
+; CHECK-NEXT:    add x14, sp, #696
+; CHECK-NEXT:    ld1 { v5.b }[4], [x12]
+; CHECK-NEXT:    add x9, sp, #632
+; CHECK-NEXT:    add x10, sp, #200
+; CHECK-NEXT:    ld1 { v6.b }[3], [x15]
+; CHECK-NEXT:    ld1 { v7.b }[3], [x14]
+; CHECK-NEXT:    ld1 { v4.b }[3], [x9]
+; CHECK-NEXT:    ld1 { v1.b }[7], [x10]
+; CHECK-NEXT:    add x10, sp, #520
+; CHECK-NEXT:    mov v17.s[0], v18.s[0]
+; CHECK-NEXT:    add x9, sp, #640
+; CHECK-NEXT:    ld1 { v5.b }[5], [x10]
+; CHECK-NEXT:    add x10, sp, #576
 ; CHECK-NEXT:    add x11, sp, #704
 ; CHECK-NEXT:    ldr b18, [sp, #736]
-; CHECK-NEXT:    mov v0.b[4], w4
-; CHECK-NEXT:    ld1 { v17.b }[4], [x11]
-; CHECK-NEXT:    ld1 { v16.b }[4], [x9]
-; CHECK-NEXT:    ld1 { v5.b }[4], [x8]
-; CHECK-NEXT:    add x9, sp, #528
+; CHECK-NEXT:    mov v2.b[6], w6
+; CHECK-NEXT:    ld1 { v6.b }[4], [x10]
+; CHECK-NEXT:    ld1 { v7.b }[4], [x11]
+; CHECK-NEXT:    ld1 { v4.b }[4], [x9]
+; CHECK-NEXT:    add x10, sp, #528
 ; CHECK-NEXT:    sshll v18.8h, v18.8b, #0
-; CHECK-NEXT:    add x8, sp, #648
+; CHECK-NEXT:    add x9, sp, #648
 ; CHECK-NEXT:    add x11, sp, #584
 ; CHECK-NEXT:    add x12, sp, #712
-; CHECK-NEXT:    ld1 { v4.b }[6], [x9]
-; CHECK-NEXT:    movi v7.2d, #0000000000000000
-; CHECK-NEXT:    ld1 { v16.b }[5], [x11]
-; CHECK-NEXT:    ld1 { v17.b }[5], [x12]
-; CHECK-NEXT:    ld1 { v5.b }[5], [x8]
-; CHECK-NEXT:    mov v0.b[5], w5
-; CHECK-NEXT:    add x9, sp, #536
+; CHECK-NEXT:    ld1 { v5.b }[6], [x10]
+; CHECK-NEXT:    movi v16.2d, #0000000000000000
+; CHECK-NEXT:    ld1 { v6.b }[5], [x11]
+; CHECK-NEXT:    ld1 { v7.b }[5], [x12]
+; CHECK-NEXT:    ld1 { v4.b }[5], [x9]
+; CHECK-NEXT:    add x16, sp, #64
+; CHECK-NEXT:    mov v2.b[7], w7
+; CHECK-NEXT:    add x10, sp, #536
 ; CHECK-NEXT:    sshll v18.4s, v18.4h, #0
-; CHECK-NEXT:    add x8, sp, #656
+; CHECK-NEXT:    ld1 { v3.b }[6], [x16]
+; CHECK-NEXT:    add x9, sp, #656
 ; CHECK-NEXT:    add x11, sp, #592
 ; CHECK-NEXT:    add x12, sp, #720
-; CHECK-NEXT:    ld1 { v4.b }[7], [x9]
-; CHECK-NEXT:    ld1 { v16.b }[6], [x11]
-; CHECK-NEXT:    ld1 { v17.b }[6], [x12]
-; CHECK-NEXT:    ld1 { v5.b }[6], [x8]
-; CHECK-NEXT:    ldr b6, [sp, #208]
-; CHECK-NEXT:    add x10, sp, #64
-; CHECK-NEXT:    mov v7.s[0], v18.s[0]
-; CHECK-NEXT:    mov v0.b[6], w6
-; CHECK-NEXT:    ld1 { v3.b }[6], [x10]
+; CHECK-NEXT:    ld1 { v5.b }[7], [x10]
+; CHECK-NEXT:    ld1 { v6.b }[6], [x11]
+; CHECK-NEXT:    ld1 { v7.b }[6], [x12]
+; CHECK-NEXT:    ld1 { v4.b }[6], [x9]
+; CHECK-NEXT:    add x8, sp, #72
+; CHECK-NEXT:    mov v16.s[0], v18.s[0]
+; CHECK-NEXT:    add x13, sp, #136
+; CHECK-NEXT:    sshll v2.8h, v2.8b, #0
+; CHECK-NEXT:    ld1 { v3.b }[7], [x8]
 ; CHECK-NEXT:    add x8, sp, #664
 ; CHECK-NEXT:    add x9, sp, #600
 ; CHECK-NEXT:    add x10, sp, #728
-; CHECK-NEXT:    sshll v4.8h, v4.8b, #0
-; CHECK-NEXT:    sshll v6.8h, v6.8b, #0
-; CHECK-NEXT:    ld1 { v16.b }[7], [x9]
-; CHECK-NEXT:    ld1 { v17.b }[7], [x10]
-; CHECK-NEXT:    ld1 { v5.b }[7], [x8]
-; CHECK-NEXT:    movi v18.2d, #0000000000000000
-; CHECK-NEXT:    mov v0.b[7], w7
-; CHECK-NEXT:    add x9, sp, #200
-; CHECK-NEXT:    add x10, sp, #72
-; CHECK-NEXT:    saddw v7.4s, v7.4s, v4.4h
-; CHECK-NEXT:    sshll v6.4s, v6.4h, #0
-; CHECK-NEXT:    sshll v16.8h, v16.8b, #0
-; CHECK-NEXT:    sshll v17.8h, v17.8b, #0
 ; CHECK-NEXT:    sshll v5.8h, v5.8b, #0
-; CHECK-NEXT:    ld1 { v2.b }[7], [x9]
-; CHECK-NEXT:    ld1 { v3.b }[7], [x10]
+; CHECK-NEXT:    ld1 { v0.b }[7], [x13]
+; CHECK-NEXT:    ld1 { v6.b }[7], [x9]
+; CHECK-NEXT:    ld1 { v7.b }[7], [x10]
+; CHECK-NEXT:    ld1 { v4.b }[7], [x8]
+; CHECK-NEXT:    saddw v17.4s, v17.4s, v2.4h
 ; CHECK-NEXT:    sshll v1.8h, v1.8b, #0
-; CHECK-NEXT:    mov v18.s[0], v6.s[0]
-; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
-; CHECK-NEXT:    saddl2 v6.4s, v17.8h, v16.8h
-; CHECK-NEXT:    saddl2 v4.4s, v5.8h, v4.8h
-; CHECK-NEXT:    saddl v16.4s, v17.4h, v16.4h
-; CHECK-NEXT:    saddw v5.4s, v7.4s, v5.4h
-; CHECK-NEXT:    sshll v2.8h, v2.8b, #0
 ; CHECK-NEXT:    sshll v3.8h, v3.8b, #0
-; CHECK-NEXT:    saddl2 v17.4s, v0.8h, v1.8h
-; CHECK-NEXT:    saddw v0.4s, v18.4s, v0.4h
-; CHECK-NEXT:    saddl2 v7.4s, v3.8h, v2.8h
-; CHECK-NEXT:    add v4.4s, v4.4s, v6.4s
-; CHECK-NEXT:    saddl v2.4s, v3.4h, v2.4h
-; CHECK-NEXT:    add v5.4s, v5.4s, v16.4s
-; CHECK-NEXT:    saddw v0.4s, v0.4s, v1.4h
-; CHECK-NEXT:    add v6.4s, v17.4s, v7.4s
-; CHECK-NEXT:    add v1.4s, v5.4s, v4.4s
+; CHECK-NEXT:    saddw v16.4s, v16.4s, v5.4h
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    sshll v6.8h, v6.8b, #0
+; CHECK-NEXT:    sshll v7.8h, v7.8b, #0
+; CHECK-NEXT:    sshll v4.8h, v4.8b, #0
+; CHECK-NEXT:    saddl2 v18.4s, v3.8h, v1.8h
+; CHECK-NEXT:    saddl v1.4s, v3.4h, v1.4h
+; CHECK-NEXT:    saddl2 v2.4s, v2.8h, v0.8h
+; CHECK-NEXT:    saddw v0.4s, v17.4s, v0.4h
+; CHECK-NEXT:    saddl2 v3.4s, v7.8h, v6.8h
+; CHECK-NEXT:    saddl v6.4s, v7.4h, v6.4h
+; CHECK-NEXT:    saddl2 v5.4s, v4.8h, v5.8h
+; CHECK-NEXT:    saddw v4.4s, v16.4s, v4.4h
+; CHECK-NEXT:    add v2.4s, v2.4s, v18.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add v1.4s, v5.4s, v3.4s
+; CHECK-NEXT:    add v3.4s, v4.4s, v6.4s
 ; CHECK-NEXT:    add v0.4s, v0.4s, v2.4s
-; CHECK-NEXT:    add v1.4s, v6.4s, v1.4s
+; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
 ; CHECK-NEXT:    add v0.4s, v0.4s, v1.4s
 ; CHECK-NEXT:    addv s0, v0.4s
 ; CHECK-NEXT:    fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/tbl-loops.ll b/llvm/test/CodeGen/AArch64/tbl-loops.ll
index 365fe03ab0b0844..9a0e670d208ffc0 100644
--- a/llvm/test/CodeGen/AArch64/tbl-loops.ll
+++ b/llvm/test/CodeGen/AArch64/tbl-loops.ll
@@ -145,50 +145,19 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-LABEL: loop2:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w2, #1
-; CHECK-NEXT:    b.lt .LBB1_7
+; CHECK-NEXT:    b.lt .LBB1_9
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
 ; CHECK-NEXT:    cmp w8, #2
-; CHECK-NEXT:    b.ls .LBB1_4
+; CHECK-NEXT:    b.ls .LBB1_6
 ; CHECK-NEXT:  // %bb.2: // %vector.memcheck
 ; CHECK-NEXT:    ubfiz x9, x8, #1, #32
 ; CHECK-NEXT:    add x9, x9, #2
 ; CHECK-NEXT:    add x10, x1, x9, lsl #2
-; CHECK-NEXT:    cmp x10, x0
-; CHECK-NEXT:    b.ls .LBB1_8
-; CHECK-NEXT:  // %bb.3: // %vector.memcheck
 ; CHECK-NEXT:    add x9, x0, x9
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    b.ls .LBB1_8
-; CHECK-NEXT:  .LBB1_4:
-; CHECK-NEXT:    mov w10, wzr
-; CHECK-NEXT:    mov x8, x1
-; CHECK-NEXT:    mov x9, x0
-; CHECK-NEXT:  .LBB1_5: // %for.body.preheader1
-; CHECK-NEXT:    movi d0, #0000000000000000
-; CHECK-NEXT:    mov w11, #1132396544 // =0x437f0000
-; CHECK-NEXT:    sub w10, w2, w10
-; CHECK-NEXT:    fmov s1, w11
-; CHECK-NEXT:  .LBB1_6: // %for.body
-; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ldp s2, s3, [x8], #8
-; CHECK-NEXT:    fcmp s2, s1
-; CHECK-NEXT:    fcsel s4, s1, s2, gt
-; CHECK-NEXT:    fcmp s2, #0.0
-; CHECK-NEXT:    fcsel s2, s0, s4, mi
-; CHECK-NEXT:    fcmp s3, s1
-; CHECK-NEXT:    fcsel s4, s1, s3, gt
-; CHECK-NEXT:    fcmp s3, #0.0
-; CHECK-NEXT:    fcvtzs w11, s2
-; CHECK-NEXT:    fcsel s3, s0, s4, mi
-; CHECK-NEXT:    subs w10, w10, #1
-; CHECK-NEXT:    strb w11, [x9]
-; CHECK-NEXT:    fcvtzs w12, s3
-; CHECK-NEXT:    strb w12, [x9, #1]
-; CHECK-NEXT:    add x9, x9, #2
-; CHECK-NEXT:    b.ne .LBB1_6
-; CHECK-NEXT:  .LBB1_7: // %for.cond.cleanup
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB1_8: // %vector.ph
+; CHECK-NEXT:    cmp x10, x0
+; CHECK-NEXT:    ccmp x9, x1, #0, hi
+; CHECK-NEXT:    b.hi .LBB1_6
+; CHECK-NEXT:  // %bb.3: // %vector.ph
 ; CHECK-NEXT:    add x11, x8, #1
 ; CHECK-NEXT:    mov w8, #1132396544 // =0x437f0000
 ; CHECK-NEXT:    and x10, x11, #0x1fffffffc
@@ -196,7 +165,7 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    add x8, x1, x10, lsl #3
 ; CHECK-NEXT:    add x9, x0, x10, lsl #1
 ; CHECK-NEXT:    mov x12, x10
-; CHECK-NEXT:  .LBB1_9: // %vector.body
+; CHECK-NEXT:  .LBB1_4: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ld2 { v1.4s, v2.4s }, [x1], #32
 ; CHECK-NEXT:    subs x12, x12, #4
@@ -214,11 +183,40 @@ define void @loop2(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    xtn v1.4h, v1.4s
 ; CHECK-NEXT:    trn1 v1.8b, v2.8b, v1.8b
 ; CHECK-NEXT:    str d1, [x0], #8
-; CHECK-NEXT:    b.ne .LBB1_9
-; CHECK-NEXT:  // %bb.10: // %middle.block
+; CHECK-NEXT:    b.ne .LBB1_4
+; CHECK-NEXT:  // %bb.5: // %middle.block
 ; CHECK-NEXT:    cmp x11, x10
-; CHECK-NEXT:    b.ne .LBB1_5
-; CHECK-NEXT:    b .LBB1_7
+; CHECK-NEXT:    b.ne .LBB1_7
+; CHECK-NEXT:    b .LBB1_9
+; CHECK-NEXT:  .LBB1_6:
+; CHECK-NEXT:    mov w10, wzr
+; CHECK-NEXT:    mov x8, x1
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:  .LBB1_7: // %for.body.preheader1
+; CHECK-NEXT:    movi d0, #0000000000000000
+; CHECK-NEXT:    mov w11, #1132396544 // =0x437f0000
+; CHECK-NEXT:    sub w10, w2, w10
+; CHECK-NEXT:    fmov s1, w11
+; CHECK-NEXT:  .LBB1_8: // %for.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldp s2, s3, [x8], #8
+; CHECK-NEXT:    fcmp s2, s1
+; CHECK-NEXT:    fcsel s4, s1, s2, gt
+; CHECK-NEXT:    fcmp s2, #0.0
+; CHECK-NEXT:    fcsel s2, s0, s4, mi
+; CHECK-NEXT:    fcmp s3, s1
+; CHECK-NEXT:    fcsel s4, s1, s3, gt
+; CHECK-NEXT:    fcmp s3, #0.0
+; CHECK-NEXT:    fcvtzs w11, s2
+; CHECK-NEXT:    fcsel s3, s0, s4, mi
+; CHECK-NEXT:    subs w10, w10, #1
+; CHECK-NEXT:    strb w11, [x9]
+; CHECK-NEXT:    fcvtzs w12, s3
+; CHECK-NEXT:    strb w12, [x9, #1]
+; CHECK-NEXT:    add x9, x9, #2
+; CHECK-NEXT:    b.ne .LBB1_8
+; CHECK-NEXT:  .LBB1_9: // %for.cond.cleanup
+; CHECK-NEXT:    ret
 entry:
   %cmp19 = icmp sgt i32 %width, 0
   br i1 %cmp19, label %for.body.preheader, label %for.cond.cleanup
@@ -529,64 +527,19 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-LABEL: loop4:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    subs w8, w2, #1
-; CHECK-NEXT:    b.lt .LBB3_7
+; CHECK-NEXT:    b.lt .LBB3_9
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
 ; CHECK-NEXT:    cmp w8, #2
-; CHECK-NEXT:    b.ls .LBB3_4
+; CHECK-NEXT:    b.ls .LBB3_6
 ; CHECK-NEXT:  // %bb.2: // %vector.memcheck
 ; CHECK-NEXT:    ubfiz x9, x8, #2, #32
 ; CHECK-NEXT:    add x9, x9, #4
 ; CHECK-NEXT:    add x10, x1, x9, lsl #2
-; CHECK-NEXT:    cmp x10, x0
-; CHECK-NEXT:    b.ls .LBB3_8
-; CHECK-NEXT:  // %bb.3: // %vector.memcheck
 ; CHECK-NEXT:    add x9, x0, x9
-; CHECK-NEXT:    cmp x9, x1
-; CHECK-NEXT:    b.ls .LBB3_8
-; CHECK-NEXT:  .LBB3_4:
-; CHECK-NEXT:    mov w10, wzr
-; CHECK-NEXT:    mov x8, x1
-; CHECK-NEXT:    mov x9, x0
-; CHECK-NEXT:  .LBB3_5: // %for.body.preheader1
-; CHECK-NEXT:    movi d0, #0000000000000000
-; CHECK-NEXT:    mov w11, #1132396544 // =0x437f0000
-; CHECK-NEXT:    sub w10, w2, w10
-; CHECK-NEXT:    fmov s1, w11
-; CHECK-NEXT:  .LBB3_6: // %for.body
-; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    ldp s2, s3, [x8]
-; CHECK-NEXT:    fcmp s2, s1
-; CHECK-NEXT:    fcsel s4, s1, s2, gt
-; CHECK-NEXT:    fcmp s2, #0.0
-; CHECK-NEXT:    fcsel s2, s0, s4, mi
-; CHECK-NEXT:    fcmp s3, s1
-; CHECK-NEXT:    fcsel s4, s1, s3, gt
-; CHECK-NEXT:    fcmp s3, #0.0
-; CHECK-NEXT:    ldp s3, s5, [x8, #8]
-; CHECK-NEXT:    fcvtzs w11, s2
-; CHECK-NEXT:    add x8, x8, #16
-; CHECK-NEXT:    fcsel s4, s0, s4, mi
-; CHECK-NEXT:    fcmp s3, s1
-; CHECK-NEXT:    strb w11, [x9]
-; CHECK-NEXT:    fcsel s6, s1, s3, gt
-; CHECK-NEXT:    fcmp s3, #0.0
-; CHECK-NEXT:    fcvtzs w12, s4
-; CHECK-NEXT:    fcsel s3, s0, s6, mi
-; CHECK-NEXT:    fcmp s5, s1
-; CHECK-NEXT:    strb w12, [x9, #1]
-; CHECK-NEXT:    fcsel s6, s1, s5, gt
-; CHECK-NEXT:    fcmp s5, #0.0
-; CHECK-NEXT:    fcvtzs w13, s3
-; CHECK-NEXT:    fcsel s5, s0, s6, mi
-; CHECK-NEXT:    subs w10, w10, #1
-; CHECK-NEXT:    strb w13, [x9, #2]
-; CHECK-NEXT:    fcvtzs w14, s5
-; CHECK-NEXT:    strb w14, [x9, #3]
-; CHECK-NEXT:    add x9, x9, #4
-; CHECK-NEXT:    b.ne .LBB3_6
-; CHECK-NEXT:  .LBB3_7: // %for.cond.cleanup
-; CHECK-NEXT:    ret
-; CHECK-NEXT:  .LBB3_8: // %vector.ph
+; CHECK-NEXT:    cmp x10, x0
+; CHECK-NEXT:    ccmp x9, x1, #0, hi
+; CHECK-NEXT:    b.hi .LBB3_6
+; CHECK-NEXT:  // %bb.3: // %vector.ph
 ; CHECK-NEXT:    add x11, x8, #1
 ; CHECK-NEXT:    mov w8, #1132396544 // =0x437f0000
 ; CHECK-NEXT:    adrp x12, .LCPI3_0
@@ -596,7 +549,7 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    add x8, x1, x10, lsl #4
 ; CHECK-NEXT:    add x9, x0, x10, lsl #2
 ; CHECK-NEXT:    mov x12, x10
-; CHECK-NEXT:  .LBB3_9: // %vector.body
+; CHECK-NEXT:  .LBB3_4: // %vector.body
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    ld4 { v2.4s, v3.4s, v4.4s, v5.4s }, [x1], #64
 ; CHECK-NEXT:    subs x12, x12, #4
@@ -626,11 +579,54 @@ define void @loop4(ptr noalias nocapture noundef writeonly %dst, ptr nocapture n
 ; CHECK-NEXT:    xtn v19.4h, v2.4s
 ; CHECK-NEXT:    tbl v2.16b, { v16.16b, v17.16b, v18.16b, v19.16b }, v1.16b
 ; CHECK-NEXT:    str q2, [x0], #16
-; CHECK-NEXT:    b.ne .LBB3_9
-; CHECK-NEXT:  // %bb.10: // %middle.block
+; CHECK-NEXT:    b.ne .LBB3_4
+; CHECK-NEXT:  // %bb.5: // %middle.block
 ; CHECK-NEXT:    cmp x11, x10
-; CHECK-NEXT:    b.ne .LBB3_5
-; CHECK-NEXT:    b .LBB3_7
+; CHECK-NEXT:    b.ne .LBB3_7
+; CHECK-NEXT:    b .LBB3_9
+; CHECK-NEXT:  .LBB3_6:
+; CHECK-NEXT:    mov w10, wzr
+; CHECK-NEXT:    mov x8, x1
+; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:  .LBB3_7: // %for.body.preheader1
+; CHECK-NEXT:    movi d0, #0000000000000000
+; CHECK-NEXT:    mov w11, #1132396544 // =0x437f0000
+; CHECK-NEXT:    sub w10, w2, w10
+; CHECK-NEXT:    fmov s1, w11
+; CHECK-NEXT:  .LBB3_8: // %for.body
+; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ldp s2, s3, [x8]
+; CHECK-NEXT:    fcmp s2, s1
+; CHECK-NEXT:    fcsel s4, s1, s2, gt
+; CHECK-NEXT:    fcmp s2, #0.0
+; CHECK-NEXT:    fcsel s2, s0, s4, mi
+; CHECK-NEXT:    fcmp s3, s1
+; CHECK-NEXT:    fcsel s4, s1, s3, gt
+; CHECK-NEXT:    fcmp s3, #0.0
+; CHECK-NEXT:    ldp s3, s5, [x8, #8]
+; CHECK-NEXT:    fcvtzs w11, s2
+; CHECK-NEXT:    add x8, x8, #16
+; CHECK-NEXT:    fcsel s4, s0, s4, mi
+; CHECK-NEXT:    fcmp s3, s1
+; CHECK-NEXT:    strb w11, [x9]
+; CHECK-NEXT:    fcsel s6, s1, s3, gt
+; CHECK-NEXT:    fcmp s3, #0.0
+; CHECK-NEXT:    fcvtzs w12, s4
+; CHECK-NEXT:    fcsel s3, s0, s6, mi
+; CHECK-NEXT:    fcmp s5, s1
+; CHECK-NEXT:    strb w12, [x9, #1]
+; CHECK-NEXT:    fcsel s6, s1, s5, gt
+; CHECK-NEXT:    fcmp s5, #0.0
+; CHECK-NEXT:    fcvtzs w13, s3
+; CHECK-NEXT:    fcsel s5, s0, s6, mi
+; CHECK-NEXT:    subs w10, w10, #1
+; CHECK-NEXT:    strb w13, [x9, #2]
+; CHECK-NEXT:    fcvtzs w14, s5
+; CHECK-NEXT:    strb w14, [x9, #3]
+; CHECK-NEXT:    add x9, x9, #4
+; CHECK-NEXT:    b.ne .LBB3_8
+; CHECK-NEXT:  .LBB3_9: // %for.cond.cleanup
+; CHECK-NEXT:    ret
 entry:
   %cmp39 = icmp sgt i32 %width, 0
   br i1 %cmp39, label %for.body.preheader, label %for.cond.cleanup
diff --git a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
index c3f547ccfc06d34..85066dbf006c210 100644
--- a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
+++ b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN:  llc -amdgpu-scalarize-global-loads=false  -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=1 -amdgpu-codegenprepare-break-large-phis=0 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
 ; XUN: llc -march=amdgcn -mcpu=tonga -amdgpu-early-ifcvt=1 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
 
@@ -6,13 +7,25 @@
 
 ; FIXME: This leaves behind a now unnecessary and with exec
 
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN: v_cmp_neq_f32_e32 vcc, 1.0, [[VAL]]
-; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], [[VAL]], [[VAL]]
-; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], [[ADD]], [[VAL]], vcc
-; GCN: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s2
+; GCN-NEXT:    s_mov_b32 s5, s3
+; GCN-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_mov_b32 s2, s6
+; GCN-NEXT:    s_mov_b32 s3, s7
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cmp_neq_f32_e32 vcc, 1.0, v0
+; GCN-NEXT:    v_add_f32_e32 v1, v0, v0
+; GCN-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load float, ptr addrspace(1) %in
   %cc = fcmp oeq float %v, 1.000000e+00
@@ -28,13 +41,34 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_diamond:
-; GCN: buffer_load_dword [[VAL:v[0-9]+]]
-; GCN: v_cmp_neq_f32_e32 vcc, 1.0, [[VAL]]
-; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[VAL]], [[VAL]]
-; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VAL]], [[VAL]]
-; GCN: buffer_store_dword [[MUL]]
 define amdgpu_kernel void @test_vccnz_ifcvt_diamond(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_diamond:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s2
+; GCN-NEXT:    s_mov_b32 s5, s3
+; GCN-NEXT:    buffer_load_dword v1, off, s[4:7], 0
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cmp_neq_f32_e32 vcc, 1.0, v1
+; GCN-NEXT:    s_cbranch_vccz .LBB1_4
+; GCN-NEXT:  ; %bb.1: ; %else
+; GCN-NEXT:    v_mul_f32_e32 v0, v1, v1
+; GCN-NEXT:    s_mov_b64 s[2:3], 0
+; GCN-NEXT:    s_cbranch_execnz .LBB1_3
+; GCN-NEXT:  .LBB1_2: ; %if
+; GCN-NEXT:    v_add_f32_e32 v0, v1, v1
+; GCN-NEXT:  .LBB1_3: ; %endif
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
+; GCN-NEXT:  .LBB1_4:
+; GCN-NEXT:    s_mov_b64 s[2:3], -1
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    s_branch .LBB1_2
 entry:
   %v = load float, ptr addrspace(1) %in
   %cc = fcmp oeq float %v, 1.000000e+00
@@ -54,13 +88,30 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_vcc_clobber:
-; GCN: ; clobber vcc
-; GCN: v_cmp_neq_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
-; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc
-; GCN: s_mov_b64 vcc, [[CMP]]
-; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle_vcc_clobber(ptr addrspace(1) %out, ptr addrspace(1) %in, float %k) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle_vcc_clobber:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xd
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ; clobber vcc
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s0, s6
+; GCN-NEXT:    s_mov_b32 s1, s7
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
+; GCN-NEXT:    v_cmp_neq_f32_e64 s[0:1], s8, 1.0
+; GCN-NEXT:    s_and_b64 s[0:1], exec, s[0:1]
+; GCN-NEXT:    s_mov_b32 s6, s2
+; GCN-NEXT:    s_mov_b32 s7, s3
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v0, v0
+; GCN-NEXT:    s_mov_b64 vcc, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load i32, ptr addrspace(1) %in
   %cc = fcmp oeq float %k, 1.000000e+00
@@ -78,18 +129,33 @@ endif:
 }
 
 ; Longest chain of cheap instructions to convert
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_max_cheap:
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_cndmask_b32_e32
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle_max_cheap(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle_max_cheap:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s2
+; GCN-NEXT:    s_mov_b32 s5, s3
+; GCN-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_mov_b32 s2, s6
+; GCN-NEXT:    s_mov_b32 s3, s7
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_cmp_neq_f32_e32 vcc, 1.0, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load float, ptr addrspace(1) %in
   %cc = fcmp oeq float %v, 1.000000e+00
@@ -114,23 +180,35 @@ endif:
 }
 
 ; Short chain of cheap instructions to not convert
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_min_expensive:
-; GCN: s_cbranch_vccnz [[ENDIF:.LBB[0-9]+_[0-9]+]]
-
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-; GCN: v_mul_f32
-
-; GCN: [[ENDIF]]:
-; GCN: buffer_store_dword
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle_min_expensive(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle_min_expensive:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s2
+; GCN-NEXT:    s_mov_b32 s5, s3
+; GCN-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cmp_neq_f32_e32 vcc, 1.0, v0
+; GCN-NEXT:    s_cbranch_vccnz .LBB4_2
+; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v1, v0, v1
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:  .LBB4_2: ; %endif
+; GCN-NEXT:    s_mov_b32 s2, s6
+; GCN-NEXT:    s_mov_b32 s3, s7
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load float, ptr addrspace(1) %in
   %cc = fcmp oeq float %v, 1.000000e+00
@@ -156,15 +234,35 @@ endif:
 }
 
 ; Should still branch over fdiv expansion
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_expensive:
-; GCN: v_cmp_neq_f32_e32
-; GCN: s_cbranch_vccnz [[ENDIF:.LBB[0-9]+_[0-9]+]]
-
-; GCN: v_div_scale_f32
-
-; GCN: [[ENDIF]]:
-; GCN: buffer_store_dword
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle_expensive(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle_expensive:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s2
+; GCN-NEXT:    s_mov_b32 s5, s3
+; GCN-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_cmp_neq_f32_e32 vcc, 1.0, v0
+; GCN-NEXT:    s_cbranch_vccnz .LBB5_2
+; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    v_div_scale_f32 v1, vcc, v0, v0, v0
+; GCN-NEXT:    v_rcp_f32_e32 v2, v1
+; GCN-NEXT:    v_fma_f32 v3, -v1, v2, 1.0
+; GCN-NEXT:    v_fma_f32 v2, v3, v2, v2
+; GCN-NEXT:    v_mul_f32_e32 v3, v1, v2
+; GCN-NEXT:    v_fma_f32 v4, -v1, v3, v1
+; GCN-NEXT:    v_fma_f32 v3, v4, v2, v3
+; GCN-NEXT:    v_fma_f32 v1, -v1, v3, v1
+; GCN-NEXT:    v_div_fmas_f32 v1, v1, v2, v3
+; GCN-NEXT:    v_div_fixup_f32 v0, v1, v0, v0
+; GCN-NEXT:  .LBB5_2: ; %endif
+; GCN-NEXT:    s_mov_b32 s2, s6
+; GCN-NEXT:    s_mov_b32 s3, s7
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load float, ptr addrspace(1) %in
   %cc = fcmp oeq float %v, 1.000000e+00
@@ -181,15 +279,26 @@ endif:
 }
 
 ; vcc branch with SGPR inputs
-; GCN-LABEL: {{^}}test_vccnz_sgpr_ifcvt_triangle:
-; GCN: v_cmp_neq_f32_e64
-; GCN: s_cbranch_vccnz [[ENDIF:.LBB[0-9]+_[0-9]+]]
-
-; GCN: s_add_i32
-
-; GCN: [[ENDIF]]:
-; GCN: buffer_store_dword
 define amdgpu_kernel void @test_vccnz_sgpr_ifcvt_triangle(ptr addrspace(1) %out, ptr addrspace(4) %in, float %cnd) #0 {
+; GCN-LABEL: test_vccnz_sgpr_ifcvt_triangle:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s1, s[0:1], 0xd
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dword s0, s[6:7], 0x0
+; GCN-NEXT:    v_cmp_neq_f32_e64 s[2:3], s1, 1.0
+; GCN-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b64 vcc, vcc
+; GCN-NEXT:    s_cbranch_vccnz .LBB6_2
+; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    s_add_i32 s0, s0, s0
+; GCN-NEXT:  .LBB6_2: ; %endif
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load i32, ptr addrspace(4) %in
   %cc = fcmp oeq float %cnd, 1.000000e+00
@@ -206,9 +315,22 @@ endif:
 
 }
 
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_constant_load:
-; GCN: v_cndmask_b32
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle_constant_load(ptr addrspace(1) %out, ptr addrspace(4) %in) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle_constant_load:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dword s2, s[2:3], 0x0
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_cmp_neq_f32_e64 s[4:5], s2, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_add_f32_e64 v1, s2, s2
+; GCN-NEXT:    s_and_b64 vcc, exec, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load float, ptr addrspace(4) %in
   %cc = fcmp oeq float %v, 1.000000e+00
@@ -226,10 +348,21 @@ endif:
 
 ; Due to broken cost heuristic, this is not if converted like
 ; test_vccnz_ifcvt_triangle_constant_load even though it should be.
-
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle_argload:
-; GCN: v_cndmask_b32
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle_argload(ptr addrspace(1) %out, float %v) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle_argload:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_cmp_neq_f32_e64 s[4:5], s2, 1.0
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_add_f32_e64 v1, s2, s2
+; GCN-NEXT:    s_and_b64 vcc, exec, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %cc = fcmp oeq float %v, 1.000000e+00
   br i1 %cc, label %if, label %endif
@@ -245,12 +378,21 @@ endif:
 }
 
 ; Scalar branch and scalar inputs
-; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle:
-; GCN: s_load_dword [[VAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x0
-; GCN: s_add_i32 [[ADD:s[0-9]+]], [[VAL]], [[VAL]]
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
-; GCN-NEXT: s_cselect_b32 [[SELECT:s[0-9]+]], [[VAL]], [[ADD]]
 define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle(ptr addrspace(4) %in, i32 %cond) #0 {
+; GCN-LABEL: test_scc1_sgpr_ifcvt_triangle:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s0, s[0:1], 0xb
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dword s1, s[2:3], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_add_i32 s2, s1, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 1
+; GCN-NEXT:    s_cselect_b32 s0, s1, s2
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ; reg use s0
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load i32, ptr addrspace(4) %in
   %cc = icmp eq i32 %cond, 1
@@ -268,15 +410,28 @@ endif:
 
 ; FIXME: Should be able to use VALU compare and select
 ; Scalar branch but VGPR select operands
-; GCN-LABEL: {{^}}test_scc1_vgpr_ifcvt_triangle:
-; GCN: s_cmp_lg_u32
-; GCN: s_cbranch_scc1 [[ENDIF:.LBB[0-9]+_[0-9]+]]
-
-; GCN: v_add_f32_e32
-
-; GCN: [[ENDIF]]:
-; GCN: buffer_store_dword
 define amdgpu_kernel void @test_scc1_vgpr_ifcvt_triangle(ptr addrspace(1) %out, ptr addrspace(1) %in, i32 %cond) #0 {
+; GCN-LABEL: test_scc1_vgpr_ifcvt_triangle:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xd
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s0, s6
+; GCN-NEXT:    s_mov_b32 s1, s7
+; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_cmp_lg_u32 s8, 1
+; GCN-NEXT:    s_cbranch_scc1 .LBB10_2
+; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_f32_e32 v0, v0, v0
+; GCN-NEXT:  .LBB10_2: ; %endif
+; GCN-NEXT:    s_mov_b32 s6, s2
+; GCN-NEXT:    s_mov_b32 s7, s3
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load float, ptr addrspace(1) %in
   %cc = icmp eq i32 %cond, 1
@@ -292,12 +447,22 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle64:
-; GCN: s_add_u32
-; GCN: s_addc_u32
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
-; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle64(ptr addrspace(4) %in, i32 %cond) #0 {
+; GCN-LABEL: test_scc1_sgpr_ifcvt_triangle64:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[2:3], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_add_u32 s2, s0, s0
+; GCN-NEXT:    s_addc_u32 s3, s1, s1
+; GCN-NEXT:    s_cmp_lg_u32 s4, 1
+; GCN-NEXT:    s_cselect_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ; reg use s[0:1]
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load i64, ptr addrspace(4) %in
   %cc = icmp eq i32 %cond, 1
@@ -314,15 +479,25 @@ endif:
 }
 
 ; TODO: Can do s_cselect_b64; s_cselect_b32
-; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle96:
-; GCN: s_add_i32
-; GCN: s_add_i32
-; GCN: s_add_i32
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
-; GCN-NEXT: s_cselect_b32 s
-; GCN-NEXT: s_cselect_b32 s
-; GCN-NEXT: s_cselect_b32 s
 define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle96(ptr addrspace(4) %in, i32 %cond) #0 {
+; GCN-LABEL: test_scc1_sgpr_ifcvt_triangle96:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s7, s[0:1], 0xb
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_add_i32 s6, s2, s2
+; GCN-NEXT:    s_add_i32 s5, s1, s1
+; GCN-NEXT:    s_add_i32 s4, s0, s0
+; GCN-NEXT:    s_cmp_lg_u32 s7, 1
+; GCN-NEXT:    s_cselect_b32 s0, s0, s4
+; GCN-NEXT:    s_cselect_b32 s1, s1, s5
+; GCN-NEXT:    s_cselect_b32 s2, s2, s6
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ; reg use s[0:3]
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load <3 x i32>, ptr addrspace(4) %in
   %cc = icmp eq i32 %cond, 1
@@ -339,15 +514,25 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_scc1_sgpr_ifcvt_triangle128:
-; GCN: s_add_i32
-; GCN: s_add_i32
-; GCN: s_add_i32
-; GCN: s_add_i32
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 1
-; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
-; GCN-NEXT: s_cselect_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}
 define amdgpu_kernel void @test_scc1_sgpr_ifcvt_triangle128(ptr addrspace(4) %in, i32 %cond) #0 {
+; GCN-LABEL: test_scc1_sgpr_ifcvt_triangle128:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xb
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[2:3], 0x0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_add_i32 s7, s3, s3
+; GCN-NEXT:    s_add_i32 s6, s2, s2
+; GCN-NEXT:    s_add_i32 s5, s1, s1
+; GCN-NEXT:    s_add_i32 s4, s0, s0
+; GCN-NEXT:    s_cmp_lg_u32 s8, 1
+; GCN-NEXT:    s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GCN-NEXT:    s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GCN-NEXT:    ;;#ASMSTART
+; GCN-NEXT:    ; reg use s[0:3]
+; GCN-NEXT:    ;;#ASMEND
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load <4 x i32>, ptr addrspace(4) %in
   %cc = icmp eq i32 %cond, 1
@@ -363,10 +548,19 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}uniform_if_swap_br_targets_scc_constant_select:
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b32 s{{[0-9]+}}, 0, 1{{$}}
 define amdgpu_kernel void @uniform_if_swap_br_targets_scc_constant_select(i32 %cond, ptr addrspace(1) %out) {
+; GCN-LABEL: uniform_if_swap_br_targets_scc_constant_select:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xb
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_cmp_lg_u32 s2, 0
+; GCN-NEXT:    s_cselect_b32 s4, 0, 1
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %cmp0 = icmp eq i32 %cond, 0
   br i1 %cmp0, label %else, label %if
@@ -383,11 +577,17 @@ done:
   ret void
 }
 
-; GCN-LABEL: {{^}}ifcvt_undef_scc:
-; GCN: {{^}}; %bb.0:
-; GCN-NEXT: s_load_dwordx2
-; GCN-NEXT: s_cselect_b32 s{{[0-9]+}}, 0, 1{{$}}
 define amdgpu_kernel void @ifcvt_undef_scc(i32 %cond, ptr addrspace(1) %out) {
+; GCN-LABEL: ifcvt_undef_scc:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xb
+; GCN-NEXT:    s_cselect_b32 s4, 0, 1
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT:    s_endpgm
 entry:
   br i1 undef, label %else, label %if
 
@@ -403,16 +603,38 @@ done:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle256:
-; GCN: v_cmp_neq_f32
-; GCN: s_cbranch_vccnz [[ENDIF:.LBB[0-9]+_[0-9]+]]
-
-; GCN: v_add_i32
-; GCN: v_add_i32
-
-; GCN: [[ENDIF]]:
-; GCN: buffer_store_dword
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle256(ptr addrspace(1) %out, ptr addrspace(1) %in, float %cnd) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle256:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xd
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s0, s6
+; GCN-NEXT:    s_mov_b32 s1, s7
+; GCN-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NEXT:    buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; GCN-NEXT:    v_cmp_neq_f32_e64 s[0:1], s8, 1.0
+; GCN-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-NEXT:    s_cbranch_vccnz .LBB16_2
+; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, v7, v7
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v6
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v5
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v3
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v2
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v0
+; GCN-NEXT:  .LBB16_2: ; %endif
+; GCN-NEXT:    s_mov_b32 s6, s2
+; GCN-NEXT:    s_mov_b32 s7, s3
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
+; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load <8 x i32>, ptr addrspace(1) %in
   %cc = fcmp oeq float %cnd, 1.000000e+00
@@ -428,16 +650,50 @@ endif:
   ret void
 }
 
-; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle512:
-; GCN: v_cmp_neq_f32
-; GCN: s_cbranch_vccnz [[ENDIF:.LBB[0-9]+_[0-9]+]]
-
-; GCN: v_add_i32
-; GCN: v_add_i32
-
-; GCN: [[ENDIF]]:
-; GCN: buffer_store_dword
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle512(ptr addrspace(1) %out, ptr addrspace(1) %in, float %cnd) #0 {
+; GCN-LABEL: test_vccnz_ifcvt_triangle512:
+; GCN:       ; %bb.0: ; %entry
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xd
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s0, s6
+; GCN-NEXT:    s_mov_b32 s1, s7
+; GCN-NEXT:    buffer_load_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NEXT:    buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; GCN-NEXT:    buffer_load_dwordx4 v[8:11], off, s[0:3], 0 offset:32
+; GCN-NEXT:    buffer_load_dwordx4 v[12:15], off, s[0:3], 0 offset:48
+; GCN-NEXT:    v_cmp_neq_f32_e64 s[6:7], s8, 1.0
+; GCN-NEXT:    s_and_b64 vcc, exec, s[6:7]
+; GCN-NEXT:    s_cbranch_vccnz .LBB17_2
+; GCN-NEXT:  ; %bb.1: ; %if
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_add_i32_e32 v15, vcc, v15, v15
+; GCN-NEXT:    v_add_i32_e32 v14, vcc, v14, v14
+; GCN-NEXT:    v_add_i32_e32 v13, vcc, v13, v13
+; GCN-NEXT:    v_add_i32_e32 v12, vcc, v12, v12
+; GCN-NEXT:    v_add_i32_e32 v11, vcc, v11, v11
+; GCN-NEXT:    v_add_i32_e32 v10, vcc, v10, v10
+; GCN-NEXT:    v_add_i32_e32 v9, vcc, v9, v9
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v8
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, v7, v7
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v6
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v5
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v3
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v2
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v0
+; GCN-NEXT:  .LBB17_2: ; %endif
+; GCN-NEXT:    s_mov_b32 s6, s2
+; GCN-NEXT:    s_mov_b32 s7, s3
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    buffer_store_dwordx4 v[12:15], off, s[4:7], 0 offset:48
+; GCN-NEXT:    buffer_store_dwordx4 v[8:11], off, s[4:7], 0 offset:32
+; GCN-NEXT:    buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
+; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; GCN-NEXT:    s_endpgm
 entry:
   %v = load <16 x i32>, ptr addrspace(1) %in
   %cc = fcmp oeq float %cnd, 1.000000e+00
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index c3ac778f82e0499..0f61ad54f736b07 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -728,328 +728,167 @@ bb:
 }
 
 define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
-; GFX1032-LABEL: test_udiv64:
-; GFX1032:       ; %bb.0: ; %bb
-; GFX1032-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
-; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT:    s_or_b64 s[8:9], s[6:7], s[4:5]
-; GFX1032-NEXT:    s_mov_b32 s8, 0
-; GFX1032-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX1032-NEXT:    s_cbranch_scc0 .LBB15_4
-; GFX1032-NEXT:  ; %bb.1:
-; GFX1032-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1032-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GFX1032-NEXT:    s_sub_u32 s9, 0, s4
-; GFX1032-NEXT:    s_subb_u32 s10, 0, s5
-; GFX1032-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
-; GFX1032-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX1032-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GFX1032-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
-; GFX1032-NEXT:    v_trunc_f32_e32 v1, v1
-; GFX1032-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GFX1032-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX1032-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1032-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT:    s_mul_i32 s11, s9, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s9, s1
-; GFX1032-NEXT:    s_mul_i32 s12, s10, s1
-; GFX1032-NEXT:    s_add_i32 s11, s13, s11
-; GFX1032-NEXT:    s_mul_i32 s14, s9, s1
-; GFX1032-NEXT:    s_add_i32 s11, s11, s12
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s1, s14
-; GFX1032-NEXT:    s_mul_hi_u32 s15, s0, s14
-; GFX1032-NEXT:    s_mul_i32 s12, s0, s14
-; GFX1032-NEXT:    s_mul_hi_u32 s14, s1, s11
-; GFX1032-NEXT:    s_mul_i32 s1, s1, s11
-; GFX1032-NEXT:    s_mul_hi_u32 s16, s0, s11
-; GFX1032-NEXT:    s_add_u32 s1, s13, s1
-; GFX1032-NEXT:    s_addc_u32 s13, 0, s14
-; GFX1032-NEXT:    s_add_u32 s1, s1, s12
-; GFX1032-NEXT:    s_mul_i32 s11, s0, s11
-; GFX1032-NEXT:    s_addc_u32 s1, s13, s15
-; GFX1032-NEXT:    s_addc_u32 s12, s16, 0
-; GFX1032-NEXT:    s_add_u32 s1, s1, s11
-; GFX1032-NEXT:    s_addc_u32 s11, 0, s12
-; GFX1032-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1032-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1032-NEXT:    s_addc_u32 s0, s0, s11
-; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT:    s_mul_i32 s11, s9, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s9, s1
-; GFX1032-NEXT:    s_mul_i32 s10, s10, s1
-; GFX1032-NEXT:    s_add_i32 s11, s12, s11
-; GFX1032-NEXT:    s_mul_i32 s9, s9, s1
-; GFX1032-NEXT:    s_add_i32 s11, s11, s10
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s0, s9
-; GFX1032-NEXT:    s_mul_i32 s13, s0, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s9, s1, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s14, s1, s11
-; GFX1032-NEXT:    s_mul_i32 s1, s1, s11
-; GFX1032-NEXT:    s_mul_hi_u32 s10, s0, s11
-; GFX1032-NEXT:    s_add_u32 s1, s9, s1
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s14
-; GFX1032-NEXT:    s_add_u32 s1, s1, s13
-; GFX1032-NEXT:    s_mul_i32 s11, s0, s11
-; GFX1032-NEXT:    s_addc_u32 s1, s9, s12
-; GFX1032-NEXT:    s_addc_u32 s9, s10, 0
-; GFX1032-NEXT:    s_add_u32 s1, s1, s11
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1032-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1032-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1032-NEXT:    s_addc_u32 s0, s0, s9
-; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT:    s_mul_i32 s10, s6, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s9, s6, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s11, s7, s0
-; GFX1032-NEXT:    s_mul_i32 s0, s7, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s6, s1
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s7, s1
-; GFX1032-NEXT:    s_mul_i32 s1, s7, s1
-; GFX1032-NEXT:    s_add_u32 s10, s12, s10
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1032-NEXT:    s_add_u32 s1, s10, s1
-; GFX1032-NEXT:    s_addc_u32 s1, s9, s13
-; GFX1032-NEXT:    s_addc_u32 s9, s11, 0
-; GFX1032-NEXT:    s_add_u32 s1, s1, s0
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s0, s4, s1
-; GFX1032-NEXT:    s_mul_i32 s11, s4, s9
-; GFX1032-NEXT:    s_mul_i32 s12, s4, s1
-; GFX1032-NEXT:    s_add_i32 s0, s0, s11
-; GFX1032-NEXT:    v_sub_co_u32 v0, s11, s6, s12
-; GFX1032-NEXT:    s_mul_i32 s10, s5, s1
-; GFX1032-NEXT:    s_add_i32 s0, s0, s10
-; GFX1032-NEXT:    v_sub_co_u32 v1, s12, v0, s4
-; GFX1032-NEXT:    s_sub_i32 s10, s7, s0
-; GFX1032-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT:    s_subb_u32 s10, s10, s5
-; GFX1032-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX1032-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v1
-; GFX1032-NEXT:    s_subb_u32 s10, s10, 0
-; GFX1032-NEXT:    s_cmp_ge_u32 s10, s5
-; GFX1032-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1032-NEXT:    s_cselect_b32 s12, -1, 0
-; GFX1032-NEXT:    s_cmp_eq_u32 s10, s5
-; GFX1032-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX1032-NEXT:    s_add_u32 s10, s1, 1
-; GFX1032-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1032-NEXT:    s_addc_u32 s12, s9, 0
-; GFX1032-NEXT:    s_add_u32 s13, s1, 2
-; GFX1032-NEXT:    s_addc_u32 s14, s9, 0
-; GFX1032-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v0
-; GFX1032-NEXT:    s_subb_u32 s0, s7, s0
-; GFX1032-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1032-NEXT:    s_cmp_ge_u32 s0, s5
-; GFX1032-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1032-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX1032-NEXT:    s_cmp_eq_u32 s0, s5
-; GFX1032-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX1032-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX1032-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1032-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1032-NEXT:    v_cndmask_b32_e32 v2, s10, v2, vcc_lo
-; GFX1032-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1032-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc_lo
-; GFX1032-NEXT:    v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX1032-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s8
-; GFX1032-NEXT:    s_cbranch_vccnz .LBB15_3
-; GFX1032-NEXT:  .LBB15_2:
-; GFX1032-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1032-NEXT:    s_sub_i32 s1, 0, s4
-; GFX1032-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GFX1032-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GFX1032-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1032-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1032-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1032-NEXT:    s_add_i32 s0, s0, s1
-; GFX1032-NEXT:    s_mul_hi_u32 s0, s6, s0
-; GFX1032-NEXT:    s_mul_i32 s1, s0, s4
-; GFX1032-NEXT:    s_add_i32 s5, s0, 1
-; GFX1032-NEXT:    s_sub_i32 s1, s6, s1
-; GFX1032-NEXT:    s_sub_i32 s6, s1, s4
-; GFX1032-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1032-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1032-NEXT:    s_cselect_b32 s1, s6, s1
-; GFX1032-NEXT:    s_add_i32 s5, s0, 1
-; GFX1032-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1032-NEXT:    s_mov_b32 s1, 0
-; GFX1032-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1032-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1032-NEXT:    v_mov_b32_e32 v1, s1
-; GFX1032-NEXT:  .LBB15_3:
-; GFX1032-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1032-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3] offset:16
-; GFX1032-NEXT:    s_endpgm
-; GFX1032-NEXT:  .LBB15_4:
-; GFX1032-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; GFX1032-NEXT:    s_branch .LBB15_2
-;
-; GFX1064-LABEL: test_udiv64:
-; GFX1064:       ; %bb.0: ; %bb
-; GFX1064-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
-; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
-; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT:    s_or_b64 s[0:1], s[6:7], s[4:5]
-; GFX1064-NEXT:    s_mov_b32 s0, 0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    s_cbranch_scc0 .LBB15_4
-; GFX1064-NEXT:  ; %bb.1:
-; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1064-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GFX1064-NEXT:    s_sub_u32 s9, 0, s4
-; GFX1064-NEXT:    s_subb_u32 s10, 0, s5
-; GFX1064-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
-; GFX1064-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX1064-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GFX1064-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
-; GFX1064-NEXT:    v_trunc_f32_e32 v1, v1
-; GFX1064-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GFX1064-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX1064-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1064-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX1064-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT:    s_mul_i32 s1, s9, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s9, s0
-; GFX1064-NEXT:    s_mul_i32 s11, s10, s0
-; GFX1064-NEXT:    s_add_i32 s1, s12, s1
-; GFX1064-NEXT:    s_mul_i32 s13, s9, s0
-; GFX1064-NEXT:    s_add_i32 s1, s1, s11
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s0, s13
-; GFX1064-NEXT:    s_mul_hi_u32 s14, s8, s13
-; GFX1064-NEXT:    s_mul_i32 s11, s8, s13
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s0, s1
-; GFX1064-NEXT:    s_mul_i32 s0, s0, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s15, s8, s1
-; GFX1064-NEXT:    s_add_u32 s0, s12, s0
-; GFX1064-NEXT:    s_addc_u32 s12, 0, s13
-; GFX1064-NEXT:    s_add_u32 s0, s0, s11
-; GFX1064-NEXT:    s_mul_i32 s1, s8, s1
-; GFX1064-NEXT:    s_addc_u32 s0, s12, s14
-; GFX1064-NEXT:    s_addc_u32 s11, s15, 0
-; GFX1064-NEXT:    s_add_u32 s0, s0, s1
-; GFX1064-NEXT:    s_addc_u32 s11, 0, s11
-; GFX1064-NEXT:    v_add_co_u32 v0, s[0:1], v0, s0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    s_addc_u32 s8, s8, s11
-; GFX1064-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT:    s_mul_i32 s1, s9, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s9, s0
-; GFX1064-NEXT:    s_mul_i32 s10, s10, s0
-; GFX1064-NEXT:    s_add_i32 s1, s11, s1
-; GFX1064-NEXT:    s_mul_i32 s9, s9, s0
-; GFX1064-NEXT:    s_add_i32 s1, s1, s10
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s8, s9
-; GFX1064-NEXT:    s_mul_i32 s12, s8, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s9, s0, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s0, s1
-; GFX1064-NEXT:    s_mul_i32 s0, s0, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s10, s8, s1
-; GFX1064-NEXT:    s_add_u32 s0, s9, s0
-; GFX1064-NEXT:    s_addc_u32 s9, 0, s13
-; GFX1064-NEXT:    s_add_u32 s0, s0, s12
-; GFX1064-NEXT:    s_mul_i32 s1, s8, s1
-; GFX1064-NEXT:    s_addc_u32 s0, s9, s11
-; GFX1064-NEXT:    s_addc_u32 s9, s10, 0
-; GFX1064-NEXT:    s_add_u32 s0, s0, s1
-; GFX1064-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1064-NEXT:    v_add_co_u32 v0, s[0:1], v0, s0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    s_addc_u32 s0, s8, s9
-; GFX1064-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1064-NEXT:    s_mul_i32 s9, s6, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s8, s6, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s10, s7, s0
-; GFX1064-NEXT:    s_mul_i32 s0, s7, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s6, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s7, s1
-; GFX1064-NEXT:    s_mul_i32 s1, s7, s1
-; GFX1064-NEXT:    s_add_u32 s9, s11, s9
-; GFX1064-NEXT:    s_addc_u32 s8, 0, s8
-; GFX1064-NEXT:    s_add_u32 s1, s9, s1
-; GFX1064-NEXT:    s_addc_u32 s1, s8, s12
-; GFX1064-NEXT:    s_addc_u32 s8, s10, 0
-; GFX1064-NEXT:    s_add_u32 s10, s1, s0
-; GFX1064-NEXT:    s_addc_u32 s11, 0, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s0, s4, s10
-; GFX1064-NEXT:    s_mul_i32 s1, s4, s11
-; GFX1064-NEXT:    s_mul_i32 s9, s4, s10
-; GFX1064-NEXT:    s_add_i32 s12, s0, s1
-; GFX1064-NEXT:    v_sub_co_u32 v0, s[0:1], s6, s9
-; GFX1064-NEXT:    s_mul_i32 s8, s5, s10
-; GFX1064-NEXT:    s_add_i32 s12, s12, s8
-; GFX1064-NEXT:    v_sub_co_u32 v1, s[8:9], v0, s4
-; GFX1064-NEXT:    s_sub_i32 s13, s7, s12
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    s_subb_u32 s13, s13, s5
-; GFX1064-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX1064-NEXT:    v_cmp_le_u32_e32 vcc, s4, v1
-; GFX1064-NEXT:    s_subb_u32 s8, s13, 0
-; GFX1064-NEXT:    s_cmp_ge_u32 s8, s5
-; GFX1064-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; GFX1064-NEXT:    s_cselect_b32 s9, -1, 0
-; GFX1064-NEXT:    s_cmp_eq_u32 s8, s5
-; GFX1064-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX1064-NEXT:    s_add_u32 s8, s10, 1
-; GFX1064-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc
-; GFX1064-NEXT:    s_addc_u32 s9, s11, 0
-; GFX1064-NEXT:    s_add_u32 s13, s10, 2
-; GFX1064-NEXT:    s_addc_u32 s14, s11, 0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
-; GFX1064-NEXT:    s_subb_u32 s0, s7, s12
-; GFX1064-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1064-NEXT:    s_cmp_ge_u32 s0, s5
-; GFX1064-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX1064-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX1064-NEXT:    s_cmp_eq_u32 s0, s5
-; GFX1064-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX1064-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX1064-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1064-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s[0:1]
-; GFX1064-NEXT:    v_cndmask_b32_e32 v2, s8, v2, vcc
-; GFX1064-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc
-; GFX1064-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX1064-NEXT:    v_cndmask_b32_e32 v1, s11, v1, vcc
-; GFX1064-NEXT:    v_cndmask_b32_e32 v0, s10, v2, vcc
-; GFX1064-NEXT:    s_cbranch_execnz .LBB15_3
-; GFX1064-NEXT:  .LBB15_2:
-; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1064-NEXT:    s_sub_i32 s1, 0, s4
-; GFX1064-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GFX1064-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
-; GFX1064-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1064-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1064-NEXT:    s_add_i32 s0, s0, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s0, s6, s0
-; GFX1064-NEXT:    s_mul_i32 s1, s0, s4
-; GFX1064-NEXT:    s_add_i32 s5, s0, 1
-; GFX1064-NEXT:    s_sub_i32 s1, s6, s1
-; GFX1064-NEXT:    s_sub_i32 s6, s1, s4
-; GFX1064-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1064-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1064-NEXT:    s_cselect_b32 s1, s6, s1
-; GFX1064-NEXT:    s_add_i32 s5, s0, 1
-; GFX1064-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1064-NEXT:    s_mov_b32 s1, 0
-; GFX1064-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1064-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1064-NEXT:    v_mov_b32_e32 v1, s1
-; GFX1064-NEXT:  .LBB15_3:
-; GFX1064-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1064-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3] offset:16
-; GFX1064-NEXT:    s_endpgm
-; GFX1064-NEXT:  .LBB15_4:
-; GFX1064-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; GFX1064-NEXT:    s_branch .LBB15_2
+; GFX10DEFWAVE-LABEL: test_udiv64:
+; GFX10DEFWAVE:       ; %bb.0: ; %bb
+; GFX10DEFWAVE-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX10DEFWAVE-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DEFWAVE-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; GFX10DEFWAVE-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DEFWAVE-NEXT:    s_or_b64 s[8:9], s[6:7], s[4:5]
+; GFX10DEFWAVE-NEXT:    s_mov_b32 s8, 0
+; GFX10DEFWAVE-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX10DEFWAVE-NEXT:    s_cbranch_scc0 .LBB15_4
+; GFX10DEFWAVE-NEXT:  ; %bb.1:
+; GFX10DEFWAVE-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX10DEFWAVE-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GFX10DEFWAVE-NEXT:    s_sub_u32 s9, 0, s4
+; GFX10DEFWAVE-NEXT:    s_subb_u32 s10, 0, s5
+; GFX10DEFWAVE-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; GFX10DEFWAVE-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX10DEFWAVE-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GFX10DEFWAVE-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GFX10DEFWAVE-NEXT:    v_trunc_f32_e32 v1, v1
+; GFX10DEFWAVE-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; GFX10DEFWAVE-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX10DEFWAVE-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX10DEFWAVE-NEXT:    v_readfirstlane_b32 s0, v1
+; GFX10DEFWAVE-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s11, s9, s0
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s13, s9, s1
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s12, s10, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s11, s13, s11
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s14, s9, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s11, s11, s12
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s13, s1, s14
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s15, s0, s14
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s12, s0, s14
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s14, s1, s11
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s1, s1, s11
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s16, s0, s11
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s13, s1
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s13, 0, s14
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s1, s12
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s11, s0, s11
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s1, s13, s15
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s12, s16, 0
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s1, s11
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s11, 0, s12
+; GFX10DEFWAVE-NEXT:    v_add_co_u32 v0, s1, v0, s1
+; GFX10DEFWAVE-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s0, s0, s11
+; GFX10DEFWAVE-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s11, s9, s0
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s12, s9, s1
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s10, s10, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s11, s12, s11
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s9, s9, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s11, s11, s10
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s12, s0, s9
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s13, s0, s9
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s9, s1, s9
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s14, s1, s11
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s1, s1, s11
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s10, s0, s11
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s9, s1
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s9, 0, s14
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s1, s13
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s11, s0, s11
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s1, s9, s12
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s9, s10, 0
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s1, s11
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s9, 0, s9
+; GFX10DEFWAVE-NEXT:    v_add_co_u32 v0, s1, v0, s1
+; GFX10DEFWAVE-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s0, s0, s9
+; GFX10DEFWAVE-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s10, s6, s0
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s9, s6, s0
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s11, s7, s0
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s0, s7, s0
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s12, s6, s1
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s13, s7, s1
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s1, s7, s1
+; GFX10DEFWAVE-NEXT:    s_add_u32 s10, s12, s10
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s9, 0, s9
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s10, s1
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s1, s9, s13
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s9, s11, 0
+; GFX10DEFWAVE-NEXT:    s_add_u32 s1, s1, s0
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s9, 0, s9
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s0, s4, s1
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s11, s4, s9
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s12, s4, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s0, s0, s11
+; GFX10DEFWAVE-NEXT:    v_sub_co_u32 v0, s11, s6, s12
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s10, s5, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s0, s0, s10
+; GFX10DEFWAVE-NEXT:    v_sub_co_u32 v1, s12, v0, s4
+; GFX10DEFWAVE-NEXT:    s_sub_i32 s10, s7, s0
+; GFX10DEFWAVE-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX10DEFWAVE-NEXT:    s_subb_u32 s10, s10, s5
+; GFX10DEFWAVE-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX10DEFWAVE-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v1
+; GFX10DEFWAVE-NEXT:    s_subb_u32 s10, s10, 0
+; GFX10DEFWAVE-NEXT:    s_cmp_ge_u32 s10, s5
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 s12, -1, 0
+; GFX10DEFWAVE-NEXT:    s_cmp_eq_u32 s10, s5
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 vcc_lo, -1, 0
+; GFX10DEFWAVE-NEXT:    s_add_u32 s10, s1, 1
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s12, s9, 0
+; GFX10DEFWAVE-NEXT:    s_add_u32 s13, s1, 2
+; GFX10DEFWAVE-NEXT:    s_addc_u32 s14, s9, 0
+; GFX10DEFWAVE-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX10DEFWAVE-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v0
+; GFX10DEFWAVE-NEXT:    s_subb_u32 s0, s7, s0
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v2, s13
+; GFX10DEFWAVE-NEXT:    s_cmp_ge_u32 s0, s5
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 s7, -1, 0
+; GFX10DEFWAVE-NEXT:    s_cmp_eq_u32 s0, s5
+; GFX10DEFWAVE-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 s0, -1, 0
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v1, s14
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s0
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e32 v2, s10, v2, vcc_lo
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
+; GFX10DEFWAVE-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc_lo
+; GFX10DEFWAVE-NEXT:    v_cndmask_b32_e32 v0, s1, v2, vcc_lo
+; GFX10DEFWAVE-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s8
+; GFX10DEFWAVE-NEXT:    s_cbranch_vccnz .LBB15_3
+; GFX10DEFWAVE-NEXT:  .LBB15_2:
+; GFX10DEFWAVE-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX10DEFWAVE-NEXT:    s_sub_i32 s1, 0, s4
+; GFX10DEFWAVE-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX10DEFWAVE-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX10DEFWAVE-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX10DEFWAVE-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s1, s1, s0
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s1, s0, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s0, s0, s1
+; GFX10DEFWAVE-NEXT:    s_mul_hi_u32 s0, s6, s0
+; GFX10DEFWAVE-NEXT:    s_mul_i32 s1, s0, s4
+; GFX10DEFWAVE-NEXT:    s_add_i32 s5, s0, 1
+; GFX10DEFWAVE-NEXT:    s_sub_i32 s1, s6, s1
+; GFX10DEFWAVE-NEXT:    s_sub_i32 s6, s1, s4
+; GFX10DEFWAVE-NEXT:    s_cmp_ge_u32 s1, s4
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 s0, s5, s0
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 s1, s6, s1
+; GFX10DEFWAVE-NEXT:    s_add_i32 s5, s0, 1
+; GFX10DEFWAVE-NEXT:    s_cmp_ge_u32 s1, s4
+; GFX10DEFWAVE-NEXT:    s_mov_b32 s1, 0
+; GFX10DEFWAVE-NEXT:    s_cselect_b32 s0, s5, s0
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v1, s1
+; GFX10DEFWAVE-NEXT:  .LBB15_3:
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DEFWAVE-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3] offset:16
+; GFX10DEFWAVE-NEXT:    s_endpgm
+; GFX10DEFWAVE-NEXT:  .LBB15_4:
+; GFX10DEFWAVE-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX10DEFWAVE-NEXT:    s_branch .LBB15_2
 bb:
   %tmp = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 1
   %tmp1 = load i64, ptr addrspace(1) %tmp, align 8
@@ -2352,45 +2191,25 @@ define amdgpu_ps float @test_ps_live() #0 {
 }
 
 define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
-; GFX1032-LABEL: test_vccnz_ifcvt_triangle64:
-; GFX1032:       ; %bb.0: ; %entry
-; GFX1032-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
-; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT:    v_cmp_neq_f64_e64 s4, s[2:3], 1.0
-; GFX1032-NEXT:    s_and_b32 vcc_lo, exec_lo, s4
-; GFX1032-NEXT:    s_cbranch_vccnz .LBB47_2
-; GFX1032-NEXT:  ; %bb.1: ; %if
-; GFX1032-NEXT:    v_add_f64 v[0:1], s[2:3], s[2:3]
-; GFX1032-NEXT:    s_branch .LBB47_3
-; GFX1032-NEXT:  .LBB47_2:
-; GFX1032-NEXT:    v_mov_b32_e32 v0, s2
-; GFX1032-NEXT:    v_mov_b32_e32 v1, s3
-; GFX1032-NEXT:  .LBB47_3: ; %endif
-; GFX1032-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1032-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX1032-NEXT:    s_endpgm
-;
-; GFX1064-LABEL: test_vccnz_ifcvt_triangle64:
-; GFX1064:       ; %bb.0: ; %entry
-; GFX1064-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
-; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT:    v_cmp_neq_f64_e64 s[4:5], s[2:3], 1.0
-; GFX1064-NEXT:    s_and_b64 vcc, exec, s[4:5]
-; GFX1064-NEXT:    s_cbranch_vccnz .LBB47_2
-; GFX1064-NEXT:  ; %bb.1: ; %if
-; GFX1064-NEXT:    v_add_f64 v[0:1], s[2:3], s[2:3]
-; GFX1064-NEXT:    s_branch .LBB47_3
-; GFX1064-NEXT:  .LBB47_2:
-; GFX1064-NEXT:    v_mov_b32_e32 v0, s2
-; GFX1064-NEXT:    v_mov_b32_e32 v1, s3
-; GFX1064-NEXT:  .LBB47_3: ; %endif
-; GFX1064-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1064-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
-; GFX1064-NEXT:    s_endpgm
+; GFX10DEFWAVE-LABEL: test_vccnz_ifcvt_triangle64:
+; GFX10DEFWAVE:       ; %bb.0: ; %entry
+; GFX10DEFWAVE-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX10DEFWAVE-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DEFWAVE-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
+; GFX10DEFWAVE-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10DEFWAVE-NEXT:    v_cmp_neq_f64_e64 s4, s[2:3], 1.0
+; GFX10DEFWAVE-NEXT:    s_and_b32 vcc_lo, exec_lo, s4
+; GFX10DEFWAVE-NEXT:    s_cbranch_vccnz .LBB47_2
+; GFX10DEFWAVE-NEXT:  ; %bb.1: ; %if
+; GFX10DEFWAVE-NEXT:    v_add_f64 v[0:1], s[2:3], s[2:3]
+; GFX10DEFWAVE-NEXT:    s_branch .LBB47_3
+; GFX10DEFWAVE-NEXT:  .LBB47_2:
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v0, s2
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v1, s3
+; GFX10DEFWAVE-NEXT:  .LBB47_3: ; %endif
+; GFX10DEFWAVE-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10DEFWAVE-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10DEFWAVE-NEXT:    s_endpgm
 entry:
   %v = load double, ptr addrspace(1) %in
   %cc = fcmp oeq double %v, 1.000000e+00
@@ -2955,5 +2774,3 @@ attributes #2 = { nounwind readnone optnone noinline }
 attributes #3 = { "target-features"="+wavefrontsize32" }
 attributes #4 = { "target-features"="+wavefrontsize64" }
 attributes #5 = { inaccessiblememonly nounwind }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GFX10DEFWAVE: {{.*}}
diff --git a/llvm/test/CodeGen/PowerPC/machine-combiner.ll b/llvm/test/CodeGen/PowerPC/machine-combiner.ll
index ea6778a4df76ee8..baaecafd00e0c77 100644
--- a/llvm/test/CodeGen/PowerPC/machine-combiner.ll
+++ b/llvm/test/CodeGen/PowerPC/machine-combiner.ll
@@ -1,4 +1,5 @@
-; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr7 < %s | FileCheck  %s -check-prefix=CHECK -check-prefix=CHECK-PWR
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr7 < %s | FileCheck  %s -check-prefix=CHECK
 ; RUN: llc -verify-machineinstrs -O3 -mcpu=pwr9 < %s | FileCheck  %s -check-prefix=FIXPOINT
 target datalayout = "E-m:e-i64:64-n32:64"
 target triple = "powerpc64-unknown-linux-gnu"
@@ -9,10 +10,17 @@ target triple = "powerpc64-unknown-linux-gnu"
 define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
 ; CHECK-LABEL: reassociate_adds1:
 ; CHECK:       # %bb.0:
-; CHECK:       fadds [[REG0:[0-9]+]], 1, 2
-; CHECK:       fadds [[REG1:[0-9]+]], 3, 4
-; CHECK:       fadds 1, [[REG0]], [[REG1]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    fadds 0, 2, 3
+; CHECK-NEXT:    fadds 1, 1, 4
+; CHECK-NEXT:    fadds 1, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds1:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsaddsp 0, 1, 2
+; FIXPOINT-NEXT:    xsaddsp 1, 3, 4
+; FIXPOINT-NEXT:    xsaddsp 1, 0, 1
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz float %x0, %x1
   %t1 = fadd reassoc nsz float %t0, %x2
@@ -23,10 +31,17 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) {
 define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
 ; CHECK-LABEL: reassociate_adds2:
 ; CHECK:       # %bb.0:
-; CHECK:       fadds [[REG0:[0-9]+]], 1, 2
-; CHECK:       fadds [[REG1:[0-9]+]], 3, 4
-; CHECK:       fadds 1, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    fadds 0, 3, 2
+; CHECK-NEXT:    fadds 1, 1, 4
+; CHECK-NEXT:    fadds 1, 0, 1
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds2:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsaddsp 0, 1, 2
+; FIXPOINT-NEXT:    xsaddsp 1, 3, 4
+; FIXPOINT-NEXT:    xsaddsp 1, 1, 0
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz float %x0, %x1
   %t1 = fadd reassoc nsz float %x2, %t0
@@ -37,10 +52,17 @@ define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) {
 define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
 ; CHECK-LABEL: reassociate_adds3:
 ; CHECK:       # %bb.0:
-; CHECK:       fadds [[REG0:[0-9]+]], 1, 2
-; CHECK:       fadds [[REG1:[0-9]+]], 4, 3
-; CHECK:       fadds 1, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    fadds 0, 2, 3
+; CHECK-NEXT:    fadds 1, 4, 1
+; CHECK-NEXT:    fadds 1, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds3:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsaddsp 0, 1, 2
+; FIXPOINT-NEXT:    xsaddsp 1, 4, 3
+; FIXPOINT-NEXT:    xsaddsp 1, 1, 0
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz float %x0, %x1
   %t1 = fadd reassoc nsz float %t0, %x2
@@ -51,10 +73,17 @@ define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) {
 define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
 ; CHECK-LABEL: reassociate_adds4:
 ; CHECK:       # %bb.0:
-; CHECK:       fadds [[REG0:[0-9]+]], 1, 2
-; CHECK:       fadds [[REG1:[0-9]+]], 4, 3
-; CHECK:       fadds 1, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    fadds 0, 3, 2
+; CHECK-NEXT:    fadds 1, 4, 1
+; CHECK-NEXT:    fadds 1, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds4:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsaddsp 0, 1, 2
+; FIXPOINT-NEXT:    xsaddsp 1, 4, 3
+; FIXPOINT-NEXT:    xsaddsp 1, 1, 0
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz float %x0, %x1
   %t1 = fadd reassoc nsz float %x2, %t0
@@ -68,14 +97,25 @@ define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) {
 define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) {
 ; CHECK-LABEL: reassociate_adds5:
 ; CHECK:       # %bb.0:
-; CHECK-DAG:   fadds [[REG12:[0-9]+]], 5, 6
-; CHECK-DAG:   fadds [[REG0:[0-9]+]], 1, 2
-; CHECK-DAG:   fadds [[REG11:[0-9]+]], 3, 4
-; CHECK-DAG:   fadds [[REG13:[0-9]+]], [[REG12]], 7
-; CHECK-DAG:   fadds [[REG1:[0-9]+]], [[REG0]], [[REG11]]
-; CHECK-DAG:   fadds [[REG2:[0-9]+]], [[REG1]], [[REG13]]
-; CHECK:       fadds 1, [[REG2]], 8
+; CHECK-NEXT:    fadds 0, 2, 3
+; CHECK-NEXT:    fadds 1, 1, 4
+; CHECK-NEXT:    fadds 0, 1, 0
+; CHECK-NEXT:    fadds 1, 5, 6
+; CHECK-NEXT:    fadds 1, 1, 7
+; CHECK-NEXT:    fadds 0, 0, 1
+; CHECK-NEXT:    fadds 1, 0, 8
 ; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds5:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsaddsp 0, 1, 2
+; FIXPOINT-NEXT:    xsaddsp 1, 3, 4
+; FIXPOINT-NEXT:    xsaddsp 0, 0, 1
+; FIXPOINT-NEXT:    xsaddsp 1, 5, 6
+; FIXPOINT-NEXT:    xsaddsp 1, 1, 7
+; FIXPOINT-NEXT:    xsaddsp 0, 0, 1
+; FIXPOINT-NEXT:    xsaddsp 1, 0, 8
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz float %x0, %x1
   %t1 = fadd reassoc nsz float %t0, %x2
@@ -92,10 +132,17 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa
 define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: vector_reassociate_adds1:
 ; CHECK:       # %bb.0:
-; CHECK-PWR:       xvaddsp [[REG0:[0-9]+]], 34, 35
-; CHECK-PWR:       xvaddsp [[REG1:[0-9]+]], 36, 37
-; CHECK-PWR:       xvaddsp 34, [[REG0]], [[REG1]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xvaddsp 0, 35, 36
+; CHECK-NEXT:    xvaddsp 1, 34, 37
+; CHECK-NEXT:    xvaddsp 34, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: vector_reassociate_adds1:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xvaddsp 0, 34, 35
+; FIXPOINT-NEXT:    xvaddsp 1, 36, 37
+; FIXPOINT-NEXT:    xvaddsp 34, 0, 1
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz <4 x float> %x0, %x1
   %t1 = fadd reassoc nsz <4 x float> %t0, %x2
@@ -106,10 +153,17 @@ define <4 x float> @vector_reassociate_adds1(<4 x float> %x0, <4 x float> %x1, <
 define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: vector_reassociate_adds2:
 ; CHECK:       # %bb.0:
-; CHECK-PWR:       xvaddsp [[REG0:[0-9]+]], 34, 35
-; CHECK-PWR:       xvaddsp [[REG1:[0-9]+]], 36, 37
-; CHECK-PWR:       xvaddsp 34, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xvaddsp 0, 36, 35
+; CHECK-NEXT:    xvaddsp 1, 34, 37
+; CHECK-NEXT:    xvaddsp 34, 0, 1
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: vector_reassociate_adds2:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xvaddsp 0, 34, 35
+; FIXPOINT-NEXT:    xvaddsp 1, 36, 37
+; FIXPOINT-NEXT:    xvaddsp 34, 1, 0
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz <4 x float> %x0, %x1
   %t1 = fadd reassoc nsz <4 x float> %x2, %t0
@@ -120,10 +174,17 @@ define <4 x float> @vector_reassociate_adds2(<4 x float> %x0, <4 x float> %x1, <
 define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: vector_reassociate_adds3:
 ; CHECK:       # %bb.0:
-; CHECK-PWR:       xvaddsp [[REG0:[0-9]+]], 34, 35
-; CHECK-PWR:       xvaddsp [[REG1:[0-9]+]], 37, 36
-; CHECK-PWR:       xvaddsp 34, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xvaddsp 0, 35, 36
+; CHECK-NEXT:    xvaddsp 1, 37, 34
+; CHECK-NEXT:    xvaddsp 34, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: vector_reassociate_adds3:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xvaddsp 0, 34, 35
+; FIXPOINT-NEXT:    xvaddsp 1, 37, 36
+; FIXPOINT-NEXT:    xvaddsp 34, 1, 0
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz <4 x float> %x0, %x1
   %t1 = fadd reassoc nsz <4 x float> %t0, %x2
@@ -134,10 +195,17 @@ define <4 x float> @vector_reassociate_adds3(<4 x float> %x0, <4 x float> %x1, <
 define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
 ; CHECK-LABEL: vector_reassociate_adds4:
 ; CHECK:       # %bb.0:
-; CHECK-PWR:       xvaddsp [[REG0:[0-9]+]], 34, 35
-; CHECK-PWR:       xvaddsp [[REG1:[0-9]+]], 37, 36
-; CHECK-PWR:       xvaddsp 34, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xvaddsp 0, 36, 35
+; CHECK-NEXT:    xvaddsp 1, 37, 34
+; CHECK-NEXT:    xvaddsp 34, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: vector_reassociate_adds4:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xvaddsp 0, 34, 35
+; FIXPOINT-NEXT:    xvaddsp 1, 37, 36
+; FIXPOINT-NEXT:    xvaddsp 34, 1, 0
+; FIXPOINT-NEXT:    blr
 
   %t0 = fadd reassoc nsz <4 x float> %x0, %x1
   %t1 = fadd reassoc nsz <4 x float> %x2, %t0
@@ -146,6 +214,19 @@ define <4 x float> @vector_reassociate_adds4(<4 x float> %x0, <4 x float> %x1, <
 }
 
 define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
+; CHECK-LABEL: reassociate_adds6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fdivs 0, 1, 2
+; CHECK-NEXT:    fadds 0, 3, 0
+; CHECK-NEXT:    fadds 1, 4, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds6:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsdivsp 0, 1, 2
+; FIXPOINT-NEXT:    xsaddsp 0, 3, 0
+; FIXPOINT-NEXT:    xsaddsp 1, 4, 0
+; FIXPOINT-NEXT:    blr
   %t0 = fdiv float %x0, %x1
   %t1 = fadd float %x2, %t0
   %t2 = fadd float %x3, %t1
@@ -153,6 +234,19 @@ define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) {
 }
 
 define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
+; CHECK-LABEL: reassociate_muls1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    fdivs 0, 1, 2
+; CHECK-NEXT:    fmuls 0, 3, 0
+; CHECK-NEXT:    fmuls 1, 4, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_muls1:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsdivsp 0, 1, 2
+; FIXPOINT-NEXT:    xsmulsp 0, 3, 0
+; FIXPOINT-NEXT:    xsmulsp 1, 4, 0
+; FIXPOINT-NEXT:    blr
   %t0 = fdiv float %x0, %x1
   %t1 = fmul float %x2, %t0
   %t2 = fmul float %x3, %t1
@@ -160,6 +254,19 @@ define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
 }
 
 define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) {
+; CHECK-LABEL: reassociate_adds_double:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xsdivdp 0, 1, 2
+; CHECK-NEXT:    xsadddp 0, 3, 0
+; CHECK-NEXT:    xsadddp 1, 4, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_adds_double:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsdivdp 0, 1, 2
+; FIXPOINT-NEXT:    xsadddp 0, 3, 0
+; FIXPOINT-NEXT:    xsadddp 1, 4, 0
+; FIXPOINT-NEXT:    blr
   %t0 = fdiv double %x0, %x1
   %t1 = fadd double %x2, %t0
   %t2 = fadd double %x3, %t1
@@ -167,6 +274,19 @@ define double @reassociate_adds_double(double %x0, double %x1, double %x2, doubl
 }
 
 define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) {
+; CHECK-LABEL: reassociate_muls_double:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    xsdivdp 0, 1, 2
+; CHECK-NEXT:    xsmuldp 0, 3, 0
+; CHECK-NEXT:    xsmuldp 1, 4, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_muls_double:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsdivdp 0, 1, 2
+; FIXPOINT-NEXT:    xsmuldp 0, 3, 0
+; FIXPOINT-NEXT:    xsmuldp 1, 4, 0
+; FIXPOINT-NEXT:    blr
   %t0 = fdiv double %x0, %x1
   %t1 = fmul double %x2, %t0
   %t2 = fmul double %x3, %t1
@@ -174,12 +294,19 @@ define double @reassociate_muls_double(double %x0, double %x1, double %x2, doubl
 }
 
 define i32 @reassociate_mullw(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
+; CHECK-LABEL: reassociate_mullw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mullw 4, 4, 5
+; CHECK-NEXT:    mullw 3, 3, 6
+; CHECK-NEXT:    mullw 3, 3, 4
+; CHECK-NEXT:    blr
+;
 ; FIXPOINT-LABEL: reassociate_mullw:
 ; FIXPOINT:       # %bb.0:
-; FIXPOINT:       mullw [[REG0:[0-9]+]], 3, 4
-; FIXPOINT:       mullw [[REG1:[0-9]+]], 5, 6
-; FIXPOINT:       mullw 3, [[REG0]], [[REG1]]
-; FIXPOINT-NEXT:  blr
+; FIXPOINT-NEXT:    mullw 3, 3, 4
+; FIXPOINT-NEXT:    mullw 4, 5, 6
+; FIXPOINT-NEXT:    mullw 3, 3, 4
+; FIXPOINT-NEXT:    blr
 
   %t0 = mul i32 %x0, %x1
   %t1 = mul i32 %t0, %x2
@@ -188,12 +315,19 @@ define i32 @reassociate_mullw(i32 %x0, i32 %x1, i32 %x2, i32 %x3) {
 }
 
 define i64 @reassociate_mulld(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
+; CHECK-LABEL: reassociate_mulld:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mulld 4, 4, 5
+; CHECK-NEXT:    mulld 3, 3, 6
+; CHECK-NEXT:    mulld 3, 3, 4
+; CHECK-NEXT:    blr
+;
 ; FIXPOINT-LABEL: reassociate_mulld:
 ; FIXPOINT:       # %bb.0:
-; FIXPOINT:       mulld [[REG0:[0-9]+]], 3, 4
-; FIXPOINT:       mulld [[REG1:[0-9]+]], 5, 6
-; FIXPOINT:       mulld 3, [[REG0]], [[REG1]]
-; FIXPOINT-NEXT:  blr
+; FIXPOINT-NEXT:    mulld 3, 3, 4
+; FIXPOINT-NEXT:    mulld 4, 5, 6
+; FIXPOINT-NEXT:    mulld 3, 3, 4
+; FIXPOINT-NEXT:    blr
 
   %t0 = mul i64 %x0, %x1
   %t1 = mul i64 %t0, %x2
@@ -204,10 +338,17 @@ define i64 @reassociate_mulld(i64 %x0, i64 %x1, i64 %x2, i64 %x3) {
 define double @reassociate_mamaa_double(double %0, double %1, double %2, double %3, double %4, double %5) {
 ; CHECK-LABEL: reassociate_mamaa_double:
 ; CHECK:       # %bb.0:
-; CHECK-PWR-DAG:   xsmaddadp 1, 6, 5
-; CHECK-PWR-DAG:   xsmaddadp 2, 4, 3
-; CHECK-PWR:       xsadddp 1, 2, 1
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xsmaddadp 1, 6, 5
+; CHECK-NEXT:    xsmaddadp 2, 4, 3
+; CHECK-NEXT:    xsadddp 1, 2, 1
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_mamaa_double:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsmaddadp 1, 6, 5
+; FIXPOINT-NEXT:    xsmaddadp 2, 4, 3
+; FIXPOINT-NEXT:    xsadddp 1, 2, 1
+; FIXPOINT-NEXT:    blr
   %7 = fmul contract reassoc nsz double %3, %2
   %8 = fmul contract reassoc nsz double %5, %4
   %9 = fadd contract reassoc nsz double %1, %0
@@ -219,10 +360,17 @@ define double @reassociate_mamaa_double(double %0, double %1, double %2, double
 define float @reassociate_mamaa_float(float %0, float %1, float %2, float %3, float %4, float %5) {
 ; CHECK-LABEL: reassociate_mamaa_float:
 ; CHECK:       # %bb.0:
-; CHECK-DAG:   fmadds [[REG0:[0-9]+]], 4, 3, 2
-; CHECK-DAG:   fmadds [[REG1:[0-9]+]], 6, 5, 1
-; CHECK:       fadds 1, [[REG0]], [[REG1]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    fmadds 0, 6, 5, 1
+; CHECK-NEXT:    fmadds 1, 4, 3, 2
+; CHECK-NEXT:    fadds 1, 1, 0
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_mamaa_float:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsmaddasp 1, 6, 5
+; FIXPOINT-NEXT:    xsmaddasp 2, 4, 3
+; FIXPOINT-NEXT:    xsaddsp 1, 2, 1
+; FIXPOINT-NEXT:    blr
   %7 = fmul contract reassoc nsz float %3, %2
   %8 = fmul contract reassoc nsz float %5, %4
   %9 = fadd contract reassoc nsz float %1, %0
@@ -234,10 +382,17 @@ define float @reassociate_mamaa_float(float %0, float %1, float %2, float %3, fl
 define <4 x float> @reassociate_mamaa_vec(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, <4 x float> %5) {
 ; CHECK-LABEL: reassociate_mamaa_vec:
 ; CHECK:       # %bb.0:
-; CHECK-PWR-DAG:   xvmaddasp [[REG0:[0-9]+]], 39, 38
-; CHECK-PWR-DAG:   xvmaddasp [[REG1:[0-9]+]], 37, 36
-; CHECK-PWR:       xvaddsp 34, [[REG1]], [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xvmaddasp 34, 39, 38
+; CHECK-NEXT:    xvmaddasp 35, 37, 36
+; CHECK-NEXT:    xvaddsp 34, 35, 34
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_mamaa_vec:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xvmaddasp 34, 39, 38
+; FIXPOINT-NEXT:    xvmaddasp 35, 37, 36
+; FIXPOINT-NEXT:    xvaddsp 34, 35, 34
+; FIXPOINT-NEXT:    blr
   %7 = fmul contract reassoc nsz <4 x float> %3, %2
   %8 = fmul contract reassoc nsz <4 x float> %5, %4
   %9 = fadd contract reassoc nsz <4 x float> %1, %0
@@ -249,12 +404,21 @@ define <4 x float> @reassociate_mamaa_vec(<4 x float> %0, <4 x float> %1, <4 x f
 define double @reassociate_mamama_double(double %0, double %1, double %2, double %3, double %4, double %5, double %6, double %7, double %8) {
 ; CHECK-LABEL: reassociate_mamama_double:
 ; CHECK:       # %bb.0:
-; CHECK-PWR:       xsmaddadp 7, 2, 1
-; CHECK-PWR-DAG:   xsmuldp [[REG0:[0-9]+]], 4, 3
-; CHECK-PWR-DAG:   xsmaddadp 7, 6, 5
-; CHECK-PWR-DAG:   xsmaddadp [[REG0]], 9, 8
-; CHECK-PWR:       xsadddp 1, 7, [[REG0]]
-; CHECK-NEXT:  blr
+; CHECK-NEXT:    xsmuldp 0, 2, 1
+; CHECK-NEXT:    xsmaddadp 7, 4, 3
+; CHECK-NEXT:    xsmaddadp 0, 6, 5
+; CHECK-NEXT:    xsadddp 1, 7, 0
+; CHECK-NEXT:    xsmaddadp 1, 9, 8
+; CHECK-NEXT:    blr
+;
+; FIXPOINT-LABEL: reassociate_mamama_double:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsmaddadp 7, 2, 1
+; FIXPOINT-NEXT:    xsmuldp 0, 4, 3
+; FIXPOINT-NEXT:    xsmaddadp 7, 6, 5
+; FIXPOINT-NEXT:    xsmaddadp 0, 9, 8
+; FIXPOINT-NEXT:    xsadddp 1, 7, 0
+; FIXPOINT-NEXT:    blr
   %10 = fmul contract reassoc nsz double %1, %0
   %11 = fmul contract reassoc nsz double %3, %2
   %12 = fmul contract reassoc nsz double %5, %4
@@ -267,21 +431,46 @@ define double @reassociate_mamama_double(double %0, double %1, double %2, double
 }
 
 define dso_local float @reassociate_mamama_8(float %0, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8,
-                                             float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16) {
 ; CHECK-LABEL: reassociate_mamama_8:
 ; CHECK:       # %bb.0:
-; CHECK-DAG:    fmadds [[REG0:[0-9]+]], 3, 2, 1
-; CHECK-DAG:    fmuls  [[REG1:[0-9]+]], 5, 4
-; CHECK-DAG:    fmadds [[REG2:[0-9]+]], 7, 6, [[REG0]]
-; CHECK-DAG:    fmadds [[REG3:[0-9]+]], 9, 8, [[REG1]]
-;
-; CHECK-DAG:    fmadds [[REG4:[0-9]+]], 13, 12, [[REG3]]
-; CHECK-DAG:    fmadds [[REG5:[0-9]+]], 11, 10, [[REG2]]
+; CHECK-NEXT:    fmuls 2, 3, 2
+; CHECK-NEXT:    fmadds 1, 5, 4, 1
+; CHECK-NEXT:    lfs 0, 172(1)
+; CHECK-NEXT:    stfd 29, -24(1) # 8-byte Folded Spill
+; CHECK-NEXT:    stfd 30, -16(1) # 8-byte Folded Spill
+; CHECK-NEXT:    lfs 30, 156(1)
+; CHECK-NEXT:    lfs 29, 164(1)
+; CHECK-NEXT:    fmadds 2, 7, 6, 2
+; CHECK-NEXT:    fmadds 1, 9, 8, 1
+; CHECK-NEXT:    stfd 31, -8(1) # 8-byte Folded Spill
+; CHECK-NEXT:    lfs 31, 180(1)
+; CHECK-NEXT:    fmadds 2, 11, 10, 2
+; CHECK-NEXT:    fmadds 1, 13, 12, 1
+; CHECK-NEXT:    fmadds 2, 29, 30, 2
+; CHECK-NEXT:    lfd 30, -16(1) # 8-byte Folded Reload
+; CHECK-NEXT:    lfd 29, -24(1) # 8-byte Folded Reload
+; CHECK-NEXT:    fadds 1, 1, 2
+; CHECK-NEXT:    fmadds 1, 31, 0, 1
+; CHECK-NEXT:    lfd 31, -8(1) # 8-byte Folded Reload
+; CHECK-NEXT:    blr
 ;
-; CHECK-DAG:    fmadds [[REG6:[0-9]+]], 31, 0, [[REG4]]
-; CHECK-DAG:    fmadds [[REG7:[0-9]+]], 29, 30, [[REG5]]
-; CHECK:        fadds 1, [[REG7]], [[REG6]]
-; CHECK-NEXT:   blr
+; FIXPOINT-LABEL: reassociate_mamama_8:
+; FIXPOINT:       # %bb.0:
+; FIXPOINT-NEXT:    xsmaddasp 1, 3, 2
+; FIXPOINT-NEXT:    xsmulsp 2, 5, 4
+; FIXPOINT-NEXT:    lxssp 2, 180(1)
+; FIXPOINT-NEXT:    lxssp 3, 156(1)
+; FIXPOINT-NEXT:    lxssp 4, 164(1)
+; FIXPOINT-NEXT:    xsmaddasp 1, 7, 6
+; FIXPOINT-NEXT:    xsmaddasp 2, 9, 8
+; FIXPOINT-NEXT:    lfs 0, 172(1)
+; FIXPOINT-NEXT:    xsmaddasp 2, 13, 12
+; FIXPOINT-NEXT:    xsmaddasp 1, 11, 10
+; FIXPOINT-NEXT:    xsmaddasp 2, 34, 0
+; FIXPOINT-NEXT:    xsmaddasp 1, 36, 35
+; FIXPOINT-NEXT:    xsaddsp 1, 1, 2
+; FIXPOINT-NEXT:    blr
+                                             float %9, float %10, float %11, float %12, float %13, float %14, float %15, float %16) {
   %18 = fmul contract reassoc nsz float %2, %1
   %19 = fadd contract reassoc nsz float %18, %0
   %20 = fmul contract reassoc nsz float %4, %3
diff --git a/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll b/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll
index da6c15a5845d55a..613b056d962f881 100644
--- a/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll
+++ b/llvm/test/CodeGen/RISCV/machine-trace-metrics-depth-height.ll
@@ -28,31 +28,31 @@ define ptr @mtm_depth_height(ptr %ptr, i64 %size) {
 ; CHECK-NEXT:       0 Instructions
 ; CHECK-NEXT: 0	[[R22:%[0-9]+]]:gpr = COPY [[X11:\$x[0-9]+]]
 ; CHECK-NEXT: 0	[[R21:%[0-9]+]]:gpr = COPY [[X10:\$x[0-9]+]]
-; CHECK-NEXT: 0	[[R24:%[0-9]+]]:gpr = SLTIU [[R21]]:gpr, 1
-; CHECK-NEXT: 0	[[R25:%[0-9]+]]:gpr = SLTIU [[R22]]:gpr, 1
-; CHECK-NEXT: 1	[[R26:%[0-9]+]]:gpr = OR killed [[R24]]:gpr
-; CHECK-NEXT: 2	BEQ killed [[R26]]:gpr, [[X0:\$x[0-9]+]], [[BB1]]
+; CHECK-NEXT: 1	[[R24:%[0-9]+]]:gpr = SLTIU [[R21]]:gpr, 1
+; CHECK-NEXT: 1	[[R25:%[0-9]+]]:gpr = SLTIU [[R22]]:gpr, 1
+; CHECK-NEXT: 2	[[R26:%[0-9]+]]:gpr = OR killed [[R24]]:gpr
+; CHECK-NEXT: 3	BEQ killed [[R26]]:gpr, [[X0:\$x[0-9]+]], [[BB1]]
 ; CHECK-EMPTY:
 ; CHECK-NEXT: Depths for [[BB1]]:
 ; CHECK-NEXT:       4 Instructions
-; CHECK-NEXT: 0	[[R29:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_ptr
-; CHECK-NEXT: 1	[[R0:%[0-9]+]]:gpr = LD [[R29]]:gpr, target-flags(riscv-lo) @heap_ptr 
-; CHECK-NEXT: 5	[[R30:%[0-9]+]]:gpr = ADD [[R0]]:gpr, [[R22]]:gpr
-; CHECK-NEXT: 0	[[R31:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_requested
-; CHECK-NEXT: 1	[[R32:%[0-9]+]]:gpr = LD [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
-; CHECK-NEXT: 5	[[R33:%[0-9]+]]:gpr = ADD killed [[R32]]:gpr, [[R22]]:gpr
-; CHECK-NEXT: 6	[[R34:%[0-9]+]]:gpr = ANDI [[R30]]:gpr, 7
-; CHECK-NEXT: 7	[[R35:%[0-9]+]]:gpr = SLTIU [[R34]]:gpr, 1
-; CHECK-NEXT: 0	[[R36:%[0-9]+]]:gpr = ADDI [[X0]], 8
-; CHECK-NEXT: 7	[[R37:%[0-9]+]]:gpr = nuw nsw SUB killed [[R36]]:gpr, [[R34]]:gpr
-; CHECK-NEXT: 8	[[R38:%[0-9]+]]:gpr = ADDI killed [[R35]]:gpr, -1
-; CHECK-NEXT: 9	[[R39:%[0-9]+]]:gpr = AND killed [[R38]]:gpr, killed [[R37]]:gpr
-; CHECK-NEXT: 10	[[R40:%[0-9]+]]:gpr = ADD killed [[R33]]:gpr, [[R39]]:gpr
-; CHECK-NEXT: 10	[[R1:%[0-9]+]]:gpr = ADD [[R30]]:gpr, [[R39:%[0-9]+]]:gpr
-; CHECK-NEXT: 11	SD killed [[R40]]:gpr, [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
-; CHECK-NEXT: 0	[[R41:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_end
-; CHECK-NEXT: 1	[[R42:%[0-9]+]]:gpr = LD killed [[R41]]:gpr, target-flags(riscv-lo) @heap_end 
-; CHECK-NEXT: 11	BGEU killed [[R42]]:gpr, [[R1]]:gpr, [[BB2]]
+; CHECK-NEXT: 1	[[R29:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_ptr
+; CHECK-NEXT: 2	[[R0:%[0-9]+]]:gpr = LD [[R29]]:gpr, target-flags(riscv-lo) @heap_ptr
+; CHECK-NEXT: 6	[[R30:%[0-9]+]]:gpr = ADD [[R0]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 1	[[R31:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_requested
+; CHECK-NEXT: 2	[[R32:%[0-9]+]]:gpr = LD [[R31]]:gpr, target-flags(riscv-lo) @heap_requested
+; CHECK-NEXT: 6	[[R33:%[0-9]+]]:gpr = ADD killed [[R32]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 7	[[R34:%[0-9]+]]:gpr = ANDI [[R30]]:gpr, 7
+; CHECK-NEXT: 8	[[R35:%[0-9]+]]:gpr = SLTIU [[R34]]:gpr, 1
+; CHECK-NEXT: 1	[[R36:%[0-9]+]]:gpr = ADDI [[X0]], 8
+; CHECK-NEXT: 8	[[R37:%[0-9]+]]:gpr = nuw nsw SUB killed [[R36]]:gpr, [[R34]]:gpr
+; CHECK-NEXT: 9	[[R38:%[0-9]+]]:gpr = ADDI killed [[R35]]:gpr, -1
+; CHECK-NEXT: 10	[[R39:%[0-9]+]]:gpr = AND killed [[R38]]:gpr, killed [[R37]]:gpr
+; CHECK-NEXT: 11	[[R40:%[0-9]+]]:gpr = ADD killed [[R33]]:gpr, [[R39]]:gpr
+; CHECK-NEXT: 11	[[R1:%[0-9]+]]:gpr = ADD [[R30]]:gpr, [[R39:%[0-9]+]]:gpr
+; CHECK-NEXT: 12	SD killed [[R40]]:gpr, [[R31]]:gpr, target-flags(riscv-lo) @heap_requested
+; CHECK-NEXT: 1	[[R41:%[0-9]+]]:gpr = LUI target-flags(riscv-hi) @heap_end
+; CHECK-NEXT: 2	[[R42:%[0-9]+]]:gpr = LD killed [[R41]]:gpr, target-flags(riscv-lo) @heap_end
+; CHECK-NEXT: 12	BGEU killed [[R42]]:gpr, [[R1]]:gpr, [[BB2]]
 ; CHECK-NEXT: Heights for [[BB10]]:
 ; CHECK-NEXT:       1 Instructions
 ; CHECK-NEXT: 0	PseudoRET implicit [[X10]]
@@ -68,26 +68,26 @@ define ptr @mtm_depth_height(ptr %ptr, i64 %size) {
 ; CHECK-NEXT: [[BB14]] Live-ins: X0 at 0
 ; CHECK-NEXT: Heights for [[BB1]]:
 ; CHECK-NEXT:      20 Instructions
-; CHECK-NEXT: 11	0	BGEU killed [[R42]]:gpr, [[R1]]:gpr, [[BB2]]
-; CHECK-NEXT: 11	4	[[R42]]:gpr = LD killed [[R41]]:gpr, target-flags(riscv-lo) @heap_end 
-; CHECK-NEXT: 11	5	[[R41]]:gpr = LUI target-flags(riscv-hi) @heap_end
-; CHECK-NEXT: 11	0	SD killed [[R40]]:gpr, [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
-; CHECK-NEXT: 11	1	[[R1]]:gpr = ADD [[R30]]:gpr, [[R39]]:gpr
-; CHECK-NEXT: 11	1	[[R40]]:gpr = ADD killed [[R33]]:gpr, [[R39]]:gpr
-; CHECK-NEXT: 11	2	[[R39]]:gpr = AND killed [[R38]]:gpr, killed [[R37]]:gpr
-; CHECK-NEXT: 11	3	[[R38]]:gpr = ADDI killed [[R35]]:gpr, -1
-; CHECK-NEXT: 11	3	[[R37]]:gpr = nuw nsw SUB killed [[R36]]:gpr, [[R34]]:gpr
-; CHECK-NEXT: 11	4	[[R36]]:gpr = ADDI [[X0]], 8
-; CHECK-NEXT: 11	4	[[R35]]:gpr = SLTIU [[R34]]:gpr, 1
-; CHECK-NEXT: 11	5	[[R34]]:gpr = ANDI [[R30]]:gpr, 7
-; CHECK-NEXT: 11	2	[[R33]]:gpr = ADD killed [[R32]]:gpr, [[R22]]:gpr
-; CHECK-NEXT: 11	6	[[R32]]:gpr = LD [[R31]]:gpr, target-flags(riscv-lo) @heap_requested 
-; CHECK-NEXT: 11	7	[[R31]]:gpr = LUI target-flags(riscv-hi) @heap_requested
-; CHECK-NEXT: 11	6	[[R30]]:gpr = ADD [[R0]]:gpr, [[R22]]:gpr
-; CHECK-NEXT: 11	10	[[R0]]:gpr = LD [[R29]]:gpr, target-flags(riscv-lo) @heap_ptr 
-; CHECK-NEXT: 11	11	[[R29]]:gpr = LUI target-flags(riscv-hi) @heap_ptr
+; CHECK-NEXT: 12	0	BGEU killed [[R42]]:gpr, [[R1]]:gpr, [[BB2]]
+; CHECK-NEXT: 12	4	[[R42]]:gpr = LD killed [[R41]]:gpr, target-flags(riscv-lo) @heap_end
+; CHECK-NEXT: 12	5	[[R41]]:gpr = LUI target-flags(riscv-hi) @heap_end
+; CHECK-NEXT: 12	0	SD killed [[R40]]:gpr, [[R31]]:gpr, target-flags(riscv-lo) @heap_requested
+; CHECK-NEXT: 12	1	[[R1]]:gpr = ADD [[R30]]:gpr, [[R39]]:gpr
+; CHECK-NEXT: 12	1	[[R40]]:gpr = ADD killed [[R33]]:gpr, [[R39]]:gpr
+; CHECK-NEXT: 12	2	[[R39]]:gpr = AND killed [[R38]]:gpr, killed [[R37]]:gpr
+; CHECK-NEXT: 12	3	[[R38]]:gpr = ADDI killed [[R35]]:gpr, -1
+; CHECK-NEXT: 12	3	[[R37]]:gpr = nuw nsw SUB killed [[R36]]:gpr, [[R34]]:gpr
+; CHECK-NEXT: 12	4	[[R36]]:gpr = ADDI [[X0]], 8
+; CHECK-NEXT: 12	4	[[R35]]:gpr = SLTIU [[R34]]:gpr, 1
+; CHECK-NEXT: 12	5	[[R34]]:gpr = ANDI [[R30]]:gpr, 7
+; CHECK-NEXT: 12	2	[[R33]]:gpr = ADD killed [[R32]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 12	6	[[R32]]:gpr = LD [[R31]]:gpr, target-flags(riscv-lo) @heap_requested
+; CHECK-NEXT: 12	7	[[R31]]:gpr = LUI target-flags(riscv-hi) @heap_requested
+; CHECK-NEXT: 12	6	[[R30]]:gpr = ADD [[R0]]:gpr, [[R22]]:gpr
+; CHECK-NEXT: 12	10	[[R0]]:gpr = LD [[R29]]:gpr, target-flags(riscv-lo) @heap_ptr
+; CHECK-NEXT: 12	11	[[R29]]:gpr = LUI target-flags(riscv-hi) @heap_ptr
 ; CHECK-NEXT: [[BB1]] Live-ins: [[R22]]@6 X0 at 4
-; CHECK-NEXT: Critical path: 11
+; CHECK-NEXT: Critical path: 12
 entry:
   %ptrint = ptrtoint ptr %ptr to i64
   %cmp = icmp eq ptr %ptr, null
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
index f3570495600f3c3..2d5eeb5ea577f37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
@@ -518,15 +518,15 @@ define i32 @explode_16xi32(<16 x i32> %v) {
 ; RV32-NEXT:    add a2, a2, a3
 ; RV32-NEXT:    add a2, a2, a4
 ; RV32-NEXT:    add a0, a0, a2
-; RV32-NEXT:    add a5, a5, a6
 ; RV32-NEXT:    add a0, a0, a5
+; RV32-NEXT:    add a0, a0, a6
 ; RV32-NEXT:    add a7, a7, t0
-; RV32-NEXT:    add a7, a7, t1
 ; RV32-NEXT:    add a0, a0, a7
-; RV32-NEXT:    add t2, t2, t3
-; RV32-NEXT:    add t2, t2, t4
-; RV32-NEXT:    add t2, t2, t5
-; RV32-NEXT:    add a0, a0, t2
+; RV32-NEXT:    add t1, t1, t2
+; RV32-NEXT:    add t1, t1, t3
+; RV32-NEXT:    add a0, a0, t1
+; RV32-NEXT:    add t4, t4, t5
+; RV32-NEXT:    add a0, a0, t4
 ; RV32-NEXT:    addi sp, s0, -128
 ; RV32-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
@@ -578,15 +578,15 @@ define i32 @explode_16xi32(<16 x i32> %v) {
 ; RV64-NEXT:    add a2, a2, a3
 ; RV64-NEXT:    add a2, a2, a4
 ; RV64-NEXT:    add a0, a0, a2
-; RV64-NEXT:    add a5, a5, a6
 ; RV64-NEXT:    add a0, a0, a5
+; RV64-NEXT:    add a0, a0, a6
 ; RV64-NEXT:    add a7, a7, t0
-; RV64-NEXT:    add a7, a7, t1
 ; RV64-NEXT:    add a0, a0, a7
-; RV64-NEXT:    add t2, t2, t3
-; RV64-NEXT:    add t2, t2, t4
-; RV64-NEXT:    add t2, t2, t5
-; RV64-NEXT:    addw a0, a0, t2
+; RV64-NEXT:    add t1, t1, t2
+; RV64-NEXT:    add t1, t1, t3
+; RV64-NEXT:    add a0, a0, t1
+; RV64-NEXT:    add t4, t4, t5
+; RV64-NEXT:    addw a0, a0, t4
 ; RV64-NEXT:    addi sp, s0, -128
 ; RV64-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
 ; RV64-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index f52ba6f51d5c897..6d7b7b648162a27 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -545,8 +545,8 @@ define void @reverse_strided_constant_neg_4xv2f32(ptr %x, ptr %z, i64 %s) {
 define void @reverse_strided_runtime_4xv2f32(ptr %x, ptr %z, i64 %s) {
 ; CHECK-LABEL: reverse_strided_runtime_4xv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:    add a3, a2, a2
+; CHECK-NEXT:    add a0, a0, a2
 ; CHECK-NEXT:    add a0, a0, a3
 ; CHECK-NEXT:    neg a2, a2
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
diff --git a/llvm/test/CodeGen/X86/alias-static-alloca.ll b/llvm/test/CodeGen/X86/alias-static-alloca.ll
index f4a9e4b0df5761c..3c895ae80410282 100644
--- a/llvm/test/CodeGen/X86/alias-static-alloca.ll
+++ b/llvm/test/CodeGen/X86/alias-static-alloca.ll
@@ -15,9 +15,9 @@ define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) {
 ; CHECK-NEXT:    movl %ecx, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movl %edi, -{{[0-9]+}}(%rsp)
 ; CHECK-NEXT:    movl %edx, -{{[0-9]+}}(%rsp)
-; CHECK-NEXT:    addl %edi, %esi
-; CHECK-NEXT:    leal (%rdx,%rcx), %eax
-; CHECK-NEXT:    addl %esi, %eax
+; CHECK-NEXT:    leal (%rsi,%rdx), %eax
+; CHECK-NEXT:    addl %edi, %ecx
+; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    retq
 entry:
   %a0 = alloca i32
diff --git a/llvm/test/CodeGen/X86/avx-vinsertf128.ll b/llvm/test/CodeGen/X86/avx-vinsertf128.ll
index 9a52ef3293d2e38..7bb874c493a3fcd 100644
--- a/llvm/test/CodeGen/X86/avx-vinsertf128.ll
+++ b/llvm/test/CodeGen/X86/avx-vinsertf128.ll
@@ -59,13 +59,13 @@ define <4 x i32> @DAGCombineA(<4 x i32> %v1) nounwind readonly {
 define <8 x i32> @DAGCombineB(<8 x i32> %v1, <8 x i32> %v2) nounwind readonly {
 ; CHECK-LABEL: DAGCombineB:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm2
-; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm1
+; CHECK-NEXT:    vextractf128 $1, %ymm1, %xmm2
 ; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm3
-; CHECK-NEXT:    vpaddd %xmm1, %xmm3, %xmm1
-; CHECK-NEXT:    vpaddd %xmm3, %xmm1, %xmm1
-; CHECK-NEXT:    vpaddd %xmm0, %xmm2, %xmm0
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpaddd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT:    vpaddd %xmm3, %xmm2, %xmm2
+; CHECK-NEXT:    vpaddd %xmm0, %xmm1, %xmm1
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %t1 = add <8 x i32> %v1, %v2
   %t2 = add <8 x i32> %t1, %v1
diff --git a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
index 34a205a7baa8641..140b345081f4152 100644
--- a/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
+++ b/llvm/test/CodeGen/X86/avx512-regcall-Mask.ll
@@ -35,9 +35,9 @@ define dso_local x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1,
 ;
 ; WIN64-LABEL: test_argv64i1:
 ; WIN64:       # %bb.0:
-; WIN64-NEXT:    addq %rcx, %rax
-; WIN64-NEXT:    addq %rdx, %rax
+; WIN64-NEXT:    addq %rdx, %rcx
 ; WIN64-NEXT:    addq %rdi, %rax
+; WIN64-NEXT:    addq %rcx, %rax
 ; WIN64-NEXT:    leaq (%rsi,%r8), %rcx
 ; WIN64-NEXT:    addq %r9, %rcx
 ; WIN64-NEXT:    addq %rcx, %rax
@@ -51,9 +51,9 @@ define dso_local x86_regcallcc i64 @test_argv64i1(<64 x i1> %x0, <64 x i1> %x1,
 ;
 ; LINUXOSX64-LABEL: test_argv64i1:
 ; LINUXOSX64:       # %bb.0:
-; LINUXOSX64-NEXT:    addq %rcx, %rax
-; LINUXOSX64-NEXT:    addq %rdx, %rax
+; LINUXOSX64-NEXT:    addq %rdx, %rcx
 ; LINUXOSX64-NEXT:    addq %rdi, %rax
+; LINUXOSX64-NEXT:    addq %rcx, %rax
 ; LINUXOSX64-NEXT:    leaq (%rsi,%r8), %rcx
 ; LINUXOSX64-NEXT:    addq %r9, %rcx
 ; LINUXOSX64-NEXT:    addq %rcx, %rax
diff --git a/llvm/test/CodeGen/X86/avx512vnni-combine.ll b/llvm/test/CodeGen/X86/avx512vnni-combine.ll
index f0c8a7e2083261f..498c2f96d2e3c5d 100644
--- a/llvm/test/CodeGen/X86/avx512vnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avx512vnni-combine.ll
@@ -4,7 +4,8 @@
 define <8 x i64> @foo_reg_512(<8 x i64> %0, <8 x i64> %1, <8 x i64> %2, <8 x i64> %3, <8 x i64> %4, <8 x i64> %5) {
 ; CHECK-LABEL: foo_reg_512:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd %zmm2, %zmm1, %zmm2
+; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
 ; CHECK-NEXT:    vpmaddwd %zmm3, %zmm1, %zmm2
 ; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
 ; CHECK-NEXT:    vpmaddwd %zmm4, %zmm1, %zmm2
@@ -56,7 +57,8 @@ define <8 x i64> @foo_512(i32 %0, <8 x i64> %1, <8 x i64> %2, ptr %3) {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB1_8: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vpdpwssd -192(%rdi), %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd -192(%rdi), %zmm1, %zmm2
+; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
 ; CHECK-NEXT:    vpmaddwd -128(%rdi), %zmm1, %zmm2
 ; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
 ; CHECK-NEXT:    vpmaddwd -64(%rdi), %zmm1, %zmm2
@@ -77,7 +79,8 @@ define <8 x i64> @foo_512(i32 %0, <8 x i64> %1, <8 x i64> %2, ptr %3) {
 ; CHECK-NEXT:    xorl %ecx, %ecx
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB1_5: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    vpdpwssd (%rsi,%rcx), %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd (%rsi,%rcx), %zmm1, %zmm2
+; CHECK-NEXT:    vpaddd %zmm2, %zmm0, %zmm0
 ; CHECK-NEXT:    addq $64, %rcx
 ; CHECK-NEXT:    cmpq %rcx, %rax
 ; CHECK-NEXT:    jne .LBB1_5
diff --git a/llvm/test/CodeGen/X86/avx512vnni.ll b/llvm/test/CodeGen/X86/avx512vnni.ll
index 8f954370015a426..67caf89dbcee001 100644
--- a/llvm/test/CodeGen/X86/avx512vnni.ll
+++ b/llvm/test/CodeGen/X86/avx512vnni.ll
@@ -4,7 +4,8 @@
 define <16 x i32> @test_pmaddwd_v32i16_add_v16i32(<16 x i32> %a0, <32 x i16> %a1, <32 x i16> %a2) {
 ; CHECK-LABEL: test_pmaddwd_v32i16_add_v16i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd %zmm2, %zmm1, %zmm1
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %1 = call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %a1, <32 x i16> %a2)
   %2 = add <16 x i32> %1, %a0
@@ -14,7 +15,8 @@ define <16 x i32> @test_pmaddwd_v32i16_add_v16i32(<16 x i32> %a0, <32 x i16> %a1
 define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_commute(<16 x i32> %a0, <32 x i16> %a1, <32 x i16> %a2) {
 ; CHECK-LABEL: test_pmaddwd_v32i16_add_v16i32_commute:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd %zmm2, %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd %zmm2, %zmm1, %zmm1
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %1 = call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %a1, <32 x i16> %a2)
   %2 = add <16 x i32> %a0, %1
@@ -24,7 +26,8 @@ define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_commute(<16 x i32> %a0, <32 x
 define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_load1(<16 x i32> %a0, ptr %p1, <32 x i16> %a2) {
 ; CHECK-LABEL: test_pmaddwd_v32i16_add_v16i32_load1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd (%rdi), %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd (%rdi), %zmm1, %zmm1
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a1 = load <32 x i16>, ptr %p1
   %1 = call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %a1, <32 x i16> %a2)
@@ -35,7 +38,8 @@ define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_load1(<16 x i32> %a0, ptr %p1,
 define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_load2(<16 x i32> %a0, <32 x i16> %a1, ptr %p2) {
 ; CHECK-LABEL: test_pmaddwd_v32i16_add_v16i32_load2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd (%rdi), %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd (%rdi), %zmm1, %zmm1
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a2 = load <32 x i16>, ptr %p2
   %1 = call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %a1, <32 x i16> %a2)
@@ -46,7 +50,8 @@ define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_load2(<16 x i32> %a0, <32 x i1
 define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_commute_load1(<16 x i32> %a0, ptr %p1, <32 x i16> %a2) {
 ; CHECK-LABEL: test_pmaddwd_v32i16_add_v16i32_commute_load1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd (%rdi), %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd (%rdi), %zmm1, %zmm1
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a1 = load <32 x i16>, ptr %p1
   %1 = call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %a1, <32 x i16> %a2)
@@ -57,7 +62,8 @@ define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_commute_load1(<16 x i32> %a0,
 define <16 x i32> @test_pmaddwd_v32i16_add_v16i32_commute_load2(<16 x i32> %a0, <32 x i16> %a1, ptr %p2) {
 ; CHECK-LABEL: test_pmaddwd_v32i16_add_v16i32_commute_load2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpdpwssd (%rdi), %zmm1, %zmm0
+; CHECK-NEXT:    vpmaddwd (%rdi), %zmm1, %zmm1
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %a2 = load <32 x i16>, ptr %p2
   %1 = call <16 x i32> @llvm.x86.avx512.pmaddw.d.512(<32 x i16> %a1, <32 x i16> %a2)
diff --git a/llvm/test/CodeGen/X86/avx_vnni-intrinsics.ll b/llvm/test/CodeGen/X86/avx_vnni-intrinsics.ll
index a1db6e54fa7969b..b52a73fbf96f75b 100644
--- a/llvm/test/CodeGen/X86/avx_vnni-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx_vnni-intrinsics.ll
@@ -73,7 +73,8 @@ declare <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
 define <8 x i32>@test_int_x86_avx_vpdpwssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
 ; AVXVNNI-LABEL: test_int_x86_avx_vpdpwssd_256:
 ; AVXVNNI:       # %bb.0:
-; AVXVNNI-NEXT:    {vex} vpdpwssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x75,0x52,0xc2]
+; AVXVNNI-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1 # encoding: [0xc5,0xf5,0xf5,0xca]
+; AVXVNNI-NEXT:    vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
 ; AVXVNNI-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VNNI-LABEL: test_int_x86_avx_vpdpwssd_256:
@@ -89,7 +90,8 @@ declare <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
 define <4 x i32>@test_int_x86_avx_vpdpwssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
 ; AVXVNNI-LABEL: test_int_x86_avx_vpdpwssd_128:
 ; AVXVNNI:       # %bb.0:
-; AVXVNNI-NEXT:    {vex} vpdpwssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x71,0x52,0xc2]
+; AVXVNNI-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0xf5,0xca]
+; AVXVNNI-NEXT:    vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
 ; AVXVNNI-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
 ;
 ; AVX512VNNI-LABEL: test_int_x86_avx_vpdpwssd_128:
diff --git a/llvm/test/CodeGen/X86/avxvnni-combine.ll b/llvm/test/CodeGen/X86/avxvnni-combine.ll
index 75e29df9f34acf8..ef22ecd0d825832 100644
--- a/llvm/test/CodeGen/X86/avxvnni-combine.ll
+++ b/llvm/test/CodeGen/X86/avxvnni-combine.ll
@@ -6,7 +6,8 @@
 define <2 x i64> @foo_reg_128(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2, <2 x i64> %3, <2 x i64> %4, <2 x i64> %5) {
 ; AVX-LABEL: foo_reg_128:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm2
+; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpmaddwd %xmm3, %xmm1, %xmm2
 ; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpmaddwd %xmm4, %xmm1, %xmm2
@@ -17,7 +18,8 @@ define <2 x i64> @foo_reg_128(<2 x i64> %0, <2 x i64> %1, <2 x i64> %2, <2 x i64
 ;
 ; AVX512-LABEL: foo_reg_128:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm2
+; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpmaddwd %xmm3, %xmm1, %xmm2
 ; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpmaddwd %xmm4, %xmm1, %xmm2
@@ -61,7 +63,8 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX-NEXT:    xorl %ecx, %ecx
 ; AVX-NEXT:    .p2align 4, 0x90
 ; AVX-NEXT:  .LBB1_8: # =>This Inner Loop Header: Depth=1
-; AVX-NEXT:    {vex} vpdpwssd -48(%rdi), %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd -48(%rdi), %xmm1, %xmm2
+; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpmaddwd -32(%rdi), %xmm1, %xmm2
 ; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    vpmaddwd -16(%rdi), %xmm1, %xmm2
@@ -82,7 +85,8 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX-NEXT:    xorl %ecx, %ecx
 ; AVX-NEXT:    .p2align 4, 0x90
 ; AVX-NEXT:  .LBB1_5: # =>This Inner Loop Header: Depth=1
-; AVX-NEXT:    {vex} vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd (%rsi,%rcx), %xmm1, %xmm2
+; AVX-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX-NEXT:    addq $16, %rcx
 ; AVX-NEXT:    cmpq %rcx, %rax
 ; AVX-NEXT:    jne .LBB1_5
@@ -108,7 +112,8 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    xorl %ecx, %ecx
 ; AVX512-NEXT:    .p2align 4, 0x90
 ; AVX512-NEXT:  .LBB1_8: # =>This Inner Loop Header: Depth=1
-; AVX512-NEXT:    vpdpwssd -48(%rdi), %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd -48(%rdi), %xmm1, %xmm2
+; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpmaddwd -32(%rdi), %xmm1, %xmm2
 ; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    vpmaddwd -16(%rdi), %xmm1, %xmm2
@@ -129,7 +134,8 @@ define <2 x i64> @foo_128(i32 %0, <2 x i64> %1, <2 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    xorl %ecx, %ecx
 ; AVX512-NEXT:    .p2align 4, 0x90
 ; AVX512-NEXT:  .LBB1_5: # =>This Inner Loop Header: Depth=1
-; AVX512-NEXT:    vpdpwssd (%rsi,%rcx), %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd (%rsi,%rcx), %xmm1, %xmm2
+; AVX512-NEXT:    vpaddd %xmm2, %xmm0, %xmm0
 ; AVX512-NEXT:    addq $16, %rcx
 ; AVX512-NEXT:    cmpq %rcx, %rax
 ; AVX512-NEXT:    jne .LBB1_5
@@ -346,7 +352,8 @@ declare <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16>, <8 x i16>) #1
 define <4 x i64> @foo_reg_256(<4 x i64> %0, <4 x i64> %1, <4 x i64> %2, <4 x i64> %3, <4 x i64> %4, <4 x i64> %5) {
 ; AVX-LABEL: foo_reg_256:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm2
+; AVX-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vpmaddwd %ymm3, %ymm1, %ymm2
 ; AVX-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vpmaddwd %ymm4, %ymm1, %ymm2
@@ -357,7 +364,8 @@ define <4 x i64> @foo_reg_256(<4 x i64> %0, <4 x i64> %1, <4 x i64> %2, <4 x i64
 ;
 ; AVX512-LABEL: foo_reg_256:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm2
+; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmaddwd %ymm3, %ymm1, %ymm2
 ; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmaddwd %ymm4, %ymm1, %ymm2
@@ -408,7 +416,8 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX-NEXT:    xorl %ecx, %ecx
 ; AVX-NEXT:    .p2align 4, 0x90
 ; AVX-NEXT:  .LBB4_8: # =>This Inner Loop Header: Depth=1
-; AVX-NEXT:    {vex} vpdpwssd -96(%rdi), %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd -96(%rdi), %ymm1, %ymm2
+; AVX-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vpmaddwd -64(%rdi), %ymm1, %ymm2
 ; AVX-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    vpmaddwd -32(%rdi), %ymm1, %ymm2
@@ -429,7 +438,8 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX-NEXT:    xorl %ecx, %ecx
 ; AVX-NEXT:    .p2align 4, 0x90
 ; AVX-NEXT:  .LBB4_5: # =>This Inner Loop Header: Depth=1
-; AVX-NEXT:    {vex} vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd (%rsi,%rcx), %ymm1, %ymm2
+; AVX-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX-NEXT:    addq $32, %rcx
 ; AVX-NEXT:    cmpq %rcx, %rax
 ; AVX-NEXT:    jne .LBB4_5
@@ -455,7 +465,8 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    xorl %ecx, %ecx
 ; AVX512-NEXT:    .p2align 4, 0x90
 ; AVX512-NEXT:  .LBB4_8: # =>This Inner Loop Header: Depth=1
-; AVX512-NEXT:    vpdpwssd -96(%rdi), %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd -96(%rdi), %ymm1, %ymm2
+; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmaddwd -64(%rdi), %ymm1, %ymm2
 ; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX512-NEXT:    vpmaddwd -32(%rdi), %ymm1, %ymm2
@@ -476,7 +487,8 @@ define <4 x i64> @foo_256(i32 %0, <4 x i64> %1, <4 x i64> %2, ptr %3) {
 ; AVX512-NEXT:    xorl %ecx, %ecx
 ; AVX512-NEXT:    .p2align 4, 0x90
 ; AVX512-NEXT:  .LBB4_5: # =>This Inner Loop Header: Depth=1
-; AVX512-NEXT:    vpdpwssd (%rsi,%rcx), %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd (%rsi,%rcx), %ymm1, %ymm2
+; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm0
 ; AVX512-NEXT:    addq $32, %rcx
 ; AVX512-NEXT:    cmpq %rcx, %rax
 ; AVX512-NEXT:    jne .LBB4_5
diff --git a/llvm/test/CodeGen/X86/avxvnni.ll b/llvm/test/CodeGen/X86/avxvnni.ll
index ea9f9100543e92d..4f3ff6ea0e9de24 100644
--- a/llvm/test/CodeGen/X86/avxvnni.ll
+++ b/llvm/test/CodeGen/X86/avxvnni.ll
@@ -6,12 +6,14 @@
 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32(<4 x i32> %a0, <8 x i16> %a1, <8 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
   %2 = add <4 x i32> %1, %a0
@@ -21,12 +23,14 @@ define <4 x i32> @test_pmaddwd_v8i16_add_v4i32(<4 x i32> %a0, <8 x i16> %a1, <8
 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute(<4 x i32> %a0, <8 x i16> %a1, <8 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_commute:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd %xmm2, %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_commute:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd %xmm2, %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd %xmm2, %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
   %2 = add <4 x i32> %a0, %1
@@ -36,12 +40,14 @@ define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute(<4 x i32> %a0, <8 x i16>
 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_load1(<4 x i32> %a0, ptr %p1, <8 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_load1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_load1:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %a1 = load <8 x i16>, ptr %p1
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
@@ -52,12 +58,14 @@ define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_load1(<4 x i32> %a0, ptr %p1, <8
 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_load2(<4 x i32> %a0, <8 x i16> %a1, ptr %p2) {
 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_load2:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_load2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %a2 = load <8 x i16>, ptr %p2
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
@@ -68,12 +76,14 @@ define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_load2(<4 x i32> %a0, <8 x i16> %a
 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute_load1(<4 x i32> %a0, ptr %p1, <8 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load1:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %a1 = load <8 x i16>, ptr %p1
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
@@ -84,12 +94,14 @@ define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute_load1(<4 x i32> %a0, ptr
 define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute_load2(<4 x i32> %a0, <8 x i16> %a1, ptr %p2) {
 ; AVX-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load2:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v8i16_add_v4i32_commute_load2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %xmm1, %xmm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %xmm1, %xmm1
+; AVX512-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %a2 = load <8 x i16>, ptr %p2
   %1 = call <4 x i32> @llvm.x86.sse2.pmadd.wd(<8 x i16> %a1, <8 x i16> %a2)
@@ -100,12 +112,14 @@ define <4 x i32> @test_pmaddwd_v8i16_add_v4i32_commute_load2(<4 x i32> %a0, <8 x
 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32(<8 x i32> %a0, <16 x i16> %a1, <16 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
   %2 = add <8 x i32> %1, %a0
@@ -115,12 +129,14 @@ define <8 x i32> @test_pmaddwd_v16i16_add_v8i32(<8 x i32> %a0, <16 x i16> %a1, <
 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute(<8 x i32> %a0, <16 x i16> %a1, <16 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_commute:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd %ymm2, %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_commute:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd %ymm2, %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd %ymm2, %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
   %2 = add <8 x i32> %a0, %1
@@ -130,12 +146,14 @@ define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute(<8 x i32> %a0, <16 x i16
 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_load1(<8 x i32> %a0, ptr %p1, <16 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_load1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_load1:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %a1 = load <16 x i16>, ptr %p1
   %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
@@ -146,12 +164,14 @@ define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_load1(<8 x i32> %a0, ptr %p1, <1
 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_load2(<8 x i32> %a0, <16 x i16> %a1, ptr %p2) {
 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_load2:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_load2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %a2 = load <16 x i16>, ptr %p2
   %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
@@ -162,12 +182,14 @@ define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_load2(<8 x i32> %a0, <16 x i16>
 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute_load1(<8 x i32> %a0, ptr %p1, <16 x i16> %a2) {
 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load1:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %a1 = load <16 x i16>, ptr %p1
   %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
@@ -178,12 +200,14 @@ define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute_load1(<8 x i32> %a0, ptr
 define <8 x i32> @test_pmaddwd_v16i16_add_v8i32_commute_load2(<8 x i32> %a0, <16 x i16> %a1, ptr %p2) {
 ; AVX-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load2:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    {vex} vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX-NEXT:    retq
 ;
 ; AVX512-LABEL: test_pmaddwd_v16i16_add_v8i32_commute_load2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpdpwssd (%rdi), %ymm1, %ymm0
+; AVX512-NEXT:    vpmaddwd (%rdi), %ymm1, %ymm1
+; AVX512-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; AVX512-NEXT:    retq
   %a2 = load <16 x i16>, ptr %p2
   %1 = call <8 x i32> @llvm.x86.avx2.pmadd.wd(<16 x i16> %a1, <16 x i16> %a2)
diff --git a/llvm/test/CodeGen/X86/early-ifcvt-remarks.ll b/llvm/test/CodeGen/X86/early-ifcvt-remarks.ll
index 4e07070fba0d1ed..bc0fce4f8cb8745 100644
--- a/llvm/test/CodeGen/X86/early-ifcvt-remarks.ll
+++ b/llvm/test/CodeGen/X86/early-ifcvt-remarks.ll
@@ -3,9 +3,9 @@ target triple = "x86_64-none-none"
 
 ; CHECK: remark: <unknown>:0:0: performing if-conversion on branch:
 ; CHECK-SAME: the condition adds {{[0-9]+}} cycle{{s?}} to the critical path,
-; CHECK-SAME: and the short leg adds another {{[0-9]+}} cycles{{s?}},
-; CHECK-SAME: and the long leg adds another {{[0-9]+}} cycles{{s?}},
-; CHECK-SAME: each staying under the threshold of {{[0-9]+}} cycles{{s?}}.
+; CHECK-SAME: and the short leg adds another {{[0-9]+}} cycle{{s?}},
+; CHECK-SAME: and the long leg adds another {{[0-9]+}} cycle{{s?}},
+; CHECK-SAME: each staying under the threshold of {{[0-9]+}} cycle{{s?}}.
 define i32 @mm1(i1 %pred, i32 %val) {
 entry:
   br i1 %pred, label %if.true, label %if.else
@@ -20,9 +20,9 @@ if.else:
 }
 
 ; CHECK: remark: <unknown>:0:0: did not if-convert branch:
-; CHECK-SAME: the condition would add {{[0-9]+}} cycles{{s?}} to the critical path,
-; CHECK-SAME: and the short leg would add another {{[0-9]+}} cycles{{s?}},
-; CHECK-SAME: and the long leg would add another {{[0-9]+}} cycles{{s?}} exceeding the limit of {{[0-9]+}} cycles{{s?}}.
+; CHECK-SAME: the condition would add {{[0-9]+}} cycle{{s?}} to the critical path,
+; CHECK-SAME: and the short leg would add another {{[0-9]+}} cycle{{s?}},
+; CHECK-SAME: and the long leg would add another {{[0-9]+}} cycle{{s?}} exceeding the limit of {{[0-9]+}} cycle{{s?}}.
 define i32 @mm2(i1 %pred, i32 %val, i32 %e1, i32 %e2, i32 %e3, i32 %e4, i32 %e5) {
 entry:
   br i1 %pred, label %if.true, label %if.else
@@ -39,11 +39,10 @@ if.else:
   ret i32 %res
 }
 
-; CHECK: did not if-convert branch:
-; CHECK-SAME: the resulting critical path ({{[0-9]+}} cycles{{s?}})
-; CHECK-SAME: would extend the shorter leg's critical path ({{[0-9]+}} cycle{{s?}})
-; CHECK-SAME: by more than the threshold of {{[0-9]+}} cycles{{s?}},
-; CHECK-SAME: which cannot be hidden by available ILP.
+; CHECK: remark: <unknown>:0:0: did not if-convert branch:
+; CHECK-SAME: the condition would add {{[0-9]+}} cycle{{s?}} to the critical path,
+; CHECK-SAME: and the short leg would add another {{[0-9]+}} cycle{{s?}},
+; CHECK-SAME: and the long leg would add another {{[0-9]+}} cycle{{s?}} exceeding the limit of {{[0-9]+}} cycle{{s?}}.
 define i32 @mm3(i1 %pred, i32 %val, i32 %e1, i128 %e2, i128 %e3, i128 %e4, i128 %e5) {
 entry:
   br i1 %pred, label %if.true, label %if.false
diff --git a/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr-2.ll b/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr-2.ll
index b88d9d171a5fdef..b6172c035386a8a 100644
--- a/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr-2.ll
+++ b/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr-2.ll
@@ -61,7 +61,7 @@ define <2 x float> @gather_v2f32_scale_3(ptr %result, <2 x i64> %idx, <2 x i1> %
 ; CHECK-LABEL: gather_v2f32_scale_3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm2
-; CHECK-NEXT:    vpaddq %xmm0, %xmm2, %xmm2
+; CHECK-NEXT:    vpaddq %xmm2, %xmm0, %xmm2
 ; CHECK-NEXT:    vpshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
 ; CHECK-NEXT:    vpslld $31, %xmm0, %xmm1
 ; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr.ll b/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr.ll
index f989f36d8fa0a0e..ffe7cf0450fc3d2 100644
--- a/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr.ll
+++ b/llvm/test/CodeGen/X86/gather-scatter-opaque-ptr.ll
@@ -65,7 +65,7 @@ define void @scatter_scale_3(ptr %result, <4 x i64> %idx, <4 x i1> %mask) {
 ; CHECK-NEXT:    vpslld $31, %xmm1, %xmm1
 ; CHECK-NEXT:    vpmovd2m %xmm1, %k1
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm1
-; CHECK-NEXT:    vpaddq %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; CHECK-NEXT:    vscatterqpd %ymm1, (%rdi,%ymm0) {%k1}
 ; CHECK-NEXT:    vzeroupper
@@ -151,7 +151,7 @@ define <4 x double> @gather_scale_3(ptr %result, <4 x i64> %idx, <4 x i1> %mask)
 ; CHECK-NEXT:    vpslld $31, %xmm1, %xmm1
 ; CHECK-NEXT:    vpmovd2m %xmm1, %k1
 ; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm1
-; CHECK-NEXT:    vpaddq %ymm0, %ymm1, %ymm1
+; CHECK-NEXT:    vpaddq %ymm1, %ymm0, %ymm1
 ; CHECK-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; CHECK-NEXT:    vgatherqpd (%rdi,%ymm1), %ymm0 {%k1}
 ; CHECK-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/hipe-cc.ll b/llvm/test/CodeGen/X86/hipe-cc.ll
index a7bcd17b232757e..d8a78eabaf66083 100644
--- a/llvm/test/CodeGen/X86/hipe-cc.ll
+++ b/llvm/test/CodeGen/X86/hipe-cc.ll
@@ -36,7 +36,7 @@ entry:
 define cc 11 {i32, i32, i32} @addfour(i32 %hp, i32 %p, i32 %x, i32 %y, i32 %z) nounwind {
 ; CHECK-LABEL: addfour:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addl %edx, %eax
+; CHECK-NEXT:    addl %edx, %ecx
 ; CHECK-NEXT:    addl %ecx, %eax
 ; CHECK-NEXT:    retl
 entry:
diff --git a/llvm/test/CodeGen/X86/hipe-cc64.ll b/llvm/test/CodeGen/X86/hipe-cc64.ll
index d8505641cd789ec..a0459b4e93331d2 100644
--- a/llvm/test/CodeGen/X86/hipe-cc64.ll
+++ b/llvm/test/CodeGen/X86/hipe-cc64.ll
@@ -40,9 +40,9 @@ entry:
 define cc 11 {i64, i64, i64} @addfour(i64 %hp, i64 %p, i64 %x, i64 %y, i64 %z, i64 %w) nounwind {
 ; CHECK-LABEL: addfour:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    addq %rsi, %rdx
-; CHECK-NEXT:    leaq (%rcx,%r8), %rax
-; CHECK-NEXT:    addq %rdx, %rax
+; CHECK-NEXT:    leaq (%rdx,%rcx), %rax
+; CHECK-NEXT:    addq %rsi, %rax
+; CHECK-NEXT:    addq %r8, %rax
 ; CHECK-NEXT:    retq
 entry:
   %0 = add i64 %x, %y
diff --git a/llvm/test/CodeGen/X86/imul.ll b/llvm/test/CodeGen/X86/imul.ll
index 9131688c4efcc11..a4fe7e5b85e5303 100644
--- a/llvm/test/CodeGen/X86/imul.ll
+++ b/llvm/test/CodeGen/X86/imul.ll
@@ -450,13 +450,12 @@ define i64 @test6(i64 %a) {
 ;
 ; X86-LABEL: test6:
 ; X86:       # %bb.0: # %entry
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    shll $5, %ecx
-; X86-NEXT:    addl %eax, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl $33, %eax
 ; X86-NEXT:    mull {{[0-9]+}}(%esp)
 ; X86-NEXT:    addl %ecx, %edx
+; X86-NEXT:    shll $5, %ecx
+; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    retl
 entry:
 	%tmp3 = mul i64 %a, 33
diff --git a/llvm/test/CodeGen/X86/lea-opt2.ll b/llvm/test/CodeGen/X86/lea-opt2.ll
index f7588577a3e9a18..6530efda26f1a97 100644
--- a/llvm/test/CodeGen/X86/lea-opt2.ll
+++ b/llvm/test/CodeGen/X86/lea-opt2.ll
@@ -35,13 +35,11 @@ entry:
 define i32 @test2(ptr %p, i32 %a, i32 %b, i32 %c) {
 ; CHECK-LABEL: test2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $edx killed $edx def $rdx
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    addl %edx, %ecx
+; CHECK-NEXT:    addl %esi, %ecx
 ; CHECK-NEXT:    movl %ecx, (%rdi)
 ; CHECK-NEXT:    subl %edx, %eax
-; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
 ; CHECK-NEXT:    retq
 entry:
   %0 = add i32 %a, %b
@@ -55,13 +53,11 @@ entry:
 define i32 @test3(ptr %p, i32 %a, i32 %b, i32 %c) {
 ; CHECK-LABEL: test3:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $edx killed $edx def $rdx
 ; CHECK-NEXT:    movl %esi, %eax
-; CHECK-NEXT:    addl %eax, %ecx
 ; CHECK-NEXT:    addl %edx, %ecx
+; CHECK-NEXT:    addl %esi, %ecx
 ; CHECK-NEXT:    movl %ecx, (%rdi)
 ; CHECK-NEXT:    subl %edx, %eax
-; CHECK-NEXT:    # kill: def $eax killed $eax killed $rax
 ; CHECK-NEXT:    retq
 entry:
   %0 = add i32 %a, %b
diff --git a/llvm/test/CodeGen/X86/machine-combiner-dbg.mir b/llvm/test/CodeGen/X86/machine-combiner-dbg.mir
index 6817d7e0ef00a70..5c93d7aedf31eae 100644
--- a/llvm/test/CodeGen/X86/machine-combiner-dbg.mir
+++ b/llvm/test/CodeGen/X86/machine-combiner-dbg.mir
@@ -1,3 +1,4 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
 # RUN: llc -mtriple=x86_64-gnu-linux -run-pass=machine-combiner %s -o - | FileCheck %s
 
 --- |
@@ -35,6 +36,19 @@ body:             |
   bb.0 (%ir-block.0):
     liveins: $xmm0, $xmm1, $xmm2, $xmm3
 
+    ; CHECK-LABEL: name: reassoc_me
+    ; CHECK: liveins: $xmm0, $xmm1, $xmm2, $xmm3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:fr32 = COPY $xmm3
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm2
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:fr32 = COPY $xmm1
+    ; CHECK-NEXT: [[COPY3:%[0-9]+]]:fr32 = COPY $xmm0
+    ; CHECK-NEXT: [[ADDSSrr:%[0-9]+]]:fr32 = nsz reassoc nofpexcept ADDSSrr [[COPY2]], [[COPY1]], implicit $mxcsr, debug-location !10
+    ; CHECK-NEXT: [[ADDSSrr1:%[0-9]+]]:fr32 = nsz reassoc nofpexcept ADDSSrr [[COPY3]], [[COPY]], implicit $mxcsr, debug-location !10
+    ; CHECK-NEXT: [[ADDSSrr2:%[0-9]+]]:fr32 = nsz reassoc nofpexcept ADDSSrr killed [[ADDSSrr1]], killed [[ADDSSrr]], implicit $mxcsr, debug-instr-number 1, debug-location !10
+    ; CHECK-NEXT: DBG_INSTR_REF !9, !DIExpression(DW_OP_LLVM_arg, 0), dbg-instr-ref(1, 0), debug-location !10
+    ; CHECK-NEXT: $xmm0 = COPY [[ADDSSrr2]], debug-location !10
+    ; CHECK-NEXT: RET 0, $xmm0, debug-location !10
     %3:fr32 = COPY $xmm3
     %2:fr32 = COPY $xmm2
     %1:fr32 = COPY $xmm1
@@ -46,13 +60,4 @@ body:             |
     $xmm0 = COPY %6, debug-location !10
     RET 0, $xmm0, debug-location !10
 
-  ; CHECK: ![[VAR:.*]] = !DILocalVariable(name: "temp3"
-  ; CHECK:      %[[arg3:.*]]:fr32 = COPY $xmm3
-  ; CHECK-NEXT: %[[arg2:.*]]:fr32 = COPY $xmm2
-  ; CHECK-NEXT: %[[arg1:.*]]:fr32 = COPY $xmm1
-  ; CHECK-NEXT: %[[arg0:.*]]:fr32 = COPY $xmm0
-  ; CHECK-NEXT: %[[add1:.*]]:fr32 = {{.*}} ADDSSrr %[[arg0]], %[[arg1]]
-  ; CHECK-NEXT: %[[add2:.*]]:fr32 = {{.*}} ADDSSrr %[[arg2]], %[[arg3]]
-  ; CHECK-NEXT:                            ADDSSrr %[[add1]], killed %[[add2]], {{.*}} debug-instr-number 1
-  ; CHECK-NEXT: DBG_INSTR_REF ![[VAR]], !DIExpression(DW_OP_LLVM_arg, 0), dbg-instr-ref(1, 0)
 ...
diff --git a/llvm/test/CodeGen/X86/machine-trace-metrics-entryBB-critpath.ll b/llvm/test/CodeGen/X86/machine-trace-metrics-entryBB-critpath.ll
index 6e15d0294fda442..673e2203e7caaf6 100644
--- a/llvm/test/CodeGen/X86/machine-trace-metrics-entryBB-critpath.ll
+++ b/llvm/test/CodeGen/X86/machine-trace-metrics-entryBB-critpath.ll
@@ -7,15 +7,15 @@
 ; MinInstr strategy is used. The behavior is demonstrated on early if conversion
 ; pass.
 
-; CHECK: TBB: MinInstr trace %bb.0 --> %bb.0 --> %bb.2: 8 instrs. 30 cycles.
+; CHECK: TBB: MinInstr trace %bb.0 --> %bb.0 --> %bb.2: 8 instrs. 31 cycles.
 ; CHECK: %bb.0
 ; CHECK:     -> %bb.2
 
-; CHECK: FBB: MinInstr trace %bb.0 --> %bb.1 --> %bb.2: 10 instrs. 32 cycles.
+; CHECK: FBB: MinInstr trace %bb.0 --> %bb.1 --> %bb.2: 10 instrs. 33 cycles.
 ; CHECK: %bb.1 <- %bb.0
 ; CHECK:     -> %bb.2
 
-; CHECK: Resource length 10, minimal critical path 30
+; CHECK: Resource length 10, minimal critical path 31
 ; CHECK: If-converting
 
 define i32 @_Z3fooiidd(i32 %a, i32 %b, double %d, double %e) #0 {
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 1289eef7795dccb..c4e52ebb3f2881b 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -533,10 +533,10 @@ define <8 x i32> @test9(ptr %base, <8 x i64> %ind1, <8 x i32>%ind5) {
 ; SKX_SMALL:       # %bb.0: # %entry
 ; SKX_SMALL-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_SMALL-NEXT:    vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
-; SKX_SMALL-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
 ; SKX_SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
 ; SKX_SMALL-NEXT:    vpmuldq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
-; SKX_SMALL-NEXT:    vpaddq %zmm1, %zmm0, %zmm1
+; SKX_SMALL-NEXT:    vpaddq %zmm1, %zmm2, %zmm1
+; SKX_SMALL-NEXT:    vpaddq %zmm0, %zmm1, %zmm1
 ; SKX_SMALL-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_SMALL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; SKX_SMALL-NEXT:    vpgatherqd 72(,%zmm1), %ymm0 {%k1}
@@ -617,10 +617,10 @@ define <8 x i32> @test10(ptr %base, <8 x i64> %i1, <8 x i32>%ind5) {
 ; SKX_SMALL:       # %bb.0: # %entry
 ; SKX_SMALL-NEXT:    vpbroadcastq %rdi, %zmm2
 ; SKX_SMALL-NEXT:    vpmullq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0
-; SKX_SMALL-NEXT:    vpaddq %zmm0, %zmm2, %zmm0
 ; SKX_SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
 ; SKX_SMALL-NEXT:    vpmuldq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm1, %zmm1
-; SKX_SMALL-NEXT:    vpaddq %zmm1, %zmm0, %zmm1
+; SKX_SMALL-NEXT:    vpaddq %zmm1, %zmm2, %zmm1
+; SKX_SMALL-NEXT:    vpaddq %zmm0, %zmm1, %zmm1
 ; SKX_SMALL-NEXT:    kxnorw %k0, %k0, %k1
 ; SKX_SMALL-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; SKX_SMALL-NEXT:    vpgatherqd 72(,%zmm1), %ymm0 {%k1}
diff --git a/llvm/test/CodeGen/X86/mul-constant-i64.ll b/llvm/test/CodeGen/X86/mul-constant-i64.ll
index 27e32e5613bcf23..0ee95ca936ad354 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i64.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i64.ll
@@ -497,13 +497,12 @@ define i64 @test_mul_by_16(i64 %x) {
 define i64 @test_mul_by_17(i64 %x) {
 ; X86-LABEL: test_mul_by_17:
 ; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    shll $4, %ecx
-; X86-NEXT:    addl %eax, %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl $17, %eax
 ; X86-NEXT:    mull {{[0-9]+}}(%esp)
 ; X86-NEXT:    addl %ecx, %edx
+; X86-NEXT:    shll $4, %ecx
+; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    retl
 ;
 ; X86-NOOPT-LABEL: test_mul_by_17:
diff --git a/llvm/test/CodeGen/X86/no-split-size.ll b/llvm/test/CodeGen/X86/no-split-size.ll
index c1f93acd77dee27..9455461135e0429 100644
--- a/llvm/test/CodeGen/X86/no-split-size.ll
+++ b/llvm/test/CodeGen/X86/no-split-size.ll
@@ -24,21 +24,21 @@ define i64 @foo(ptr %ptr, i64 %p2, i64 %p3, i64 %p4, i64 %p5, i64 %p6) optsize {
 ; CHECK-NEXT:    .cfi_offset %r15, -16
 ; CHECK-NEXT:    movq %r9, %r14
 ; CHECK-NEXT:    movq %r8, %rbx
-; CHECK-NEXT:    movq %rcx, %r12
-; CHECK-NEXT:    movq %rdx, %r15
-; CHECK-NEXT:    movq %rsi, %r13
+; CHECK-NEXT:    movq %rcx, %r15
+; CHECK-NEXT:    movq %rdx, %r13
+; CHECK-NEXT:    movq %rsi, %r12
 ; CHECK-NEXT:    testq %rdi, %rdi
 ; CHECK-NEXT:    je .LBB0_1
 ; CHECK-NEXT:  # %bb.2: # %if.else
-; CHECK-NEXT:    testq %r13, %r13
-; CHECK-NEXT:    movq %r15, %rax
+; CHECK-NEXT:    testq %r12, %r12
+; CHECK-NEXT:    movq %r13, %rax
 ; CHECK-NEXT:    je .LBB0_3
 ; CHECK-NEXT:  .LBB0_4: # %if.end
-; CHECK-NEXT:    addq %r13, %rax
-; CHECK-NEXT:    addq %r12, %r15
-; CHECK-NEXT:    addq %rax, %r15
+; CHECK-NEXT:    addq %r13, %r12
+; CHECK-NEXT:    addq %r15, %rax
+; CHECK-NEXT:    addq %r12, %rax
 ; CHECK-NEXT:    addq %r14, %rbx
-; CHECK-NEXT:    addq %r15, %rbx
+; CHECK-NEXT:    addq %rax, %rbx
 ; CHECK-NEXT:    movq %rbx, %rax
 ; CHECK-NEXT:    popq %rbx
 ; CHECK-NEXT:    .cfi_def_cfa_offset 40
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx512vnni.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx512vnni.ll
index cc529c3fad23c1e..fe13ed7fa3a1070 100644
--- a/llvm/test/CodeGen/X86/stack-folding-int-avx512vnni.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-int-avx512vnni.ll
@@ -11,7 +11,8 @@ define <16 x i32> @stack_fold_vpdpwssd(<16 x i32> %a0, <16 x i32> %a1, <16 x i32
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    vpdpwssd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
+; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %a0, <16 x i32> %a1, <16 x i32> %a2)
@@ -25,7 +26,8 @@ define <16 x i32> @stack_fold_vpdpwssd_commuted(<16 x i32> %a0, <16 x i32> %a1,
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    vpdpwssd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm0 # 64-byte Folded Reload
+; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 64-byte Folded Reload
+; CHECK-NEXT:    vpaddd %zmm1, %zmm0, %zmm0
 ; CHECK-NEXT:    retq
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
   %2 = call <16 x i32> @llvm.x86.avx512.vpdpwssd.512(<16 x i32> %a0, <16 x i32> %a2, <16 x i32> %a1)
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avxvnni.ll b/llvm/test/CodeGen/X86/stack-folding-int-avxvnni.ll
index 4b0f63f9a6389ed..bef387915fa67bd 100644
--- a/llvm/test/CodeGen/X86/stack-folding-int-avxvnni.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-int-avxvnni.ll
@@ -20,7 +20,8 @@ define <4 x i32> @stack_fold_vpdpwssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    {vex} vpdpwssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
   %2 = call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
@@ -34,7 +35,8 @@ define <4 x i32> @stack_fold_vpdpwssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    {vex} vpdpwssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; CHECK-NEXT:    retq
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
   %2 = call <4 x i32> @llvm.x86.avx512.vpdpwssd.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
@@ -48,7 +50,8 @@ define <8 x i32> @stack_fold_vpdpwssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    {vex} vpdpwssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
   %2 = call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
@@ -62,7 +65,8 @@ define <8 x i32> @stack_fold_vpdpwssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1,
 ; CHECK-NEXT:    #APP
 ; CHECK-NEXT:    nop
 ; CHECK-NEXT:    #NO_APP
-; CHECK-NEXT:    {vex} vpdpwssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
+; CHECK-NEXT:    vpmaddwd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm1 # 32-byte Folded Reload
+; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
 ; CHECK-NEXT:    retq
   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
   %2 = call <8 x i32> @llvm.x86.avx512.vpdpwssd.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
diff --git a/llvm/test/CodeGen/X86/umul-with-overflow.ll b/llvm/test/CodeGen/X86/umul-with-overflow.ll
index ccabb360a990c95..46d2ec84797f818 100644
--- a/llvm/test/CodeGen/X86/umul-with-overflow.ll
+++ b/llvm/test/CodeGen/X86/umul-with-overflow.ll
@@ -93,7 +93,7 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    movl %ebx, %eax
 ; X86-NEXT:    mull %edi
 ; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    mull %edi
 ; X86-NEXT:    movl %edx, %ecx
@@ -111,7 +111,7 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    mull %ebp
 ; X86-NEXT:    addl %esi, %eax
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
 ; X86-NEXT:    movzbl %cl, %eax
 ; X86-NEXT:    adcl %eax, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -142,9 +142,9 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    addl %edi, %esi
 ; X86-NEXT:    movzbl %cl, %eax
 ; X86-NEXT:    adcl %eax, %ebp
-; X86-NEXT:    addl (%esp), %esi # 4-byte Folded Reload
+; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
 ; X86-NEXT:    adcl %ebx, %ebp
-; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    adcl $0, (%esp) # 4-byte Folded Spill
 ; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -163,21 +163,21 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    addl %ebx, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    adcl %edi, %ecx
-; X86-NEXT:    setb (%esp) # 1-byte Folded Spill
+; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    mull {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    movl %eax, %ebx
 ; X86-NEXT:    addl %ecx, %ebx
-; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
+; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
 ; X86-NEXT:    adcl %eax, %edi
 ; X86-NEXT:    addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    adcl $0, %ebx
 ; X86-NEXT:    adcl $0, %edi
-; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    addl (%esp), %ebx # 4-byte Folded Reload
 ; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
+; X86-NEXT:    setb (%esp) # 1-byte Folded Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    mull %ecx
@@ -206,7 +206,7 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    adcl %edi, %ebp
 ; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
+; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
 ; X86-NEXT:    adcl %eax, %esi
 ; X86-NEXT:    movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    adcl $0, %edx
@@ -215,7 +215,7 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    movl %ebx, %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    mull %ecx
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
 ; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
 ; X86-NEXT:    movl %ebp, %eax
@@ -235,7 +235,7 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    movl %ebp, %eax
 ; X86-NEXT:    mull %ebx
 ; X86-NEXT:    addl %esi, %eax
-; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movzbl %cl, %eax
 ; X86-NEXT:    adcl %eax, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -262,20 +262,20 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    mull %ebx
-; X86-NEXT:    movl %edx, %ebx
-; X86-NEXT:    movl %eax, %ebp
-; X86-NEXT:    addl %edi, %ebp
+; X86-NEXT:    movl %edx, %ebp
+; X86-NEXT:    movl %eax, %ebx
+; X86-NEXT:    addl %edi, %ebx
 ; X86-NEXT:    movzbl %cl, %eax
-; X86-NEXT:    adcl %eax, %ebx
-; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
-; X86-NEXT:    adcl $0, (%esp) # 4-byte Folded Spill
+; X86-NEXT:    adcl %eax, %ebp
+; X86-NEXT:    addl (%esp), %ebx # 4-byte Folded Reload
+; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
 ; X86-NEXT:    mull %edi
 ; X86-NEXT:    movl %edx, %ecx
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
 ; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    mull %edi
 ; X86-NEXT:    movl %edx, %esi
@@ -297,140 +297,141 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    addl %ecx, %edi
 ; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
 ; X86-NEXT:    adcl %eax, %esi
-; X86-NEXT:    addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-NEXT:    adcl %ebx, %ebp
+; X86-NEXT:    addl %ebx, (%esp) # 4-byte Folded Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-NEXT:    adcl %ebp, %ebx
 ; X86-NEXT:    adcl $0, %edi
 ; X86-NEXT:    adcl $0, %esi
-; X86-NEXT:    addl (%esp), %edi # 4-byte Folded Reload
+; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
 ; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    imull %edx, %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    movl %ebx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl %ebp, %eax
 ; X86-NEXT:    mull %edx
 ; X86-NEXT:    addl %edx, %ecx
-; X86-NEXT:    imull {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    addl %ecx, %ebx
+; X86-NEXT:    imull {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    addl %ecx, %ebp
 ; X86-NEXT:    movl %eax, %edx
 ; X86-NEXT:    addl %edi, %edx
-; X86-NEXT:    adcl %esi, %ebx
+; X86-NEXT:    adcl %esi, %ebp
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-NEXT:    movl (%esp), %eax # 4-byte Reload
 ; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    adcl $0, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    adcl $0, %ebx
-; X86-NEXT:    movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    adcl $0, %ebp
+; X86-NEXT:    movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
+; X86-NEXT:    movl %ebx, %eax
 ; X86-NEXT:    mull %ecx
 ; X86-NEXT:    movl %edx, %edi
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    mull %ecx
 ; X86-NEXT:    movl %edx, %ecx
-; X86-NEXT:    movl %eax, %ebp
-; X86-NEXT:    addl %edi, %ebp
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    addl %edi, %esi
 ; X86-NEXT:    adcl $0, %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl %ebx, %eax
 ; X86-NEXT:    mull %edi
-; X86-NEXT:    movl %edi, %esi
-; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    movl %edi, %ebx
+; X86-NEXT:    movl %edx, %ebp
 ; X86-NEXT:    movl %eax, %edi
-; X86-NEXT:    addl %ebp, %edi
-; X86-NEXT:    adcl %ecx, %ebx
+; X86-NEXT:    addl %esi, %edi
+; X86-NEXT:    adcl %ecx, %ebp
 ; X86-NEXT:    setb %cl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    mull %esi
-; X86-NEXT:    addl %ebx, %eax
+; X86-NEXT:    mull %ebx
+; X86-NEXT:    addl %ebp, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movzbl %cl, %eax
 ; X86-NEXT:    adcl %eax, %edx
 ; X86-NEXT:    movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl %ebp, %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    mull %ecx
-; X86-NEXT:    movl %edx, %ebp
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    mull %ecx
 ; X86-NEXT:    movl %edx, %ecx
 ; X86-NEXT:    movl %eax, %ebx
-; X86-NEXT:    addl %ebp, %ebx
+; X86-NEXT:    addl %esi, %ebx
 ; X86-NEXT:    adcl $0, %ecx
-; X86-NEXT:    movl %esi, %eax
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    mull %esi
-; X86-NEXT:    movl %edx, %ebp
+; X86-NEXT:    movl %ebp, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    mull %ebp
+; X86-NEXT:    movl %edx, %esi
 ; X86-NEXT:    addl %ebx, %eax
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    adcl %ecx, %ebp
-; X86-NEXT:    setb (%esp) # 1-byte Folded Spill
+; X86-NEXT:    adcl %ecx, %esi
+; X86-NEXT:    setb %bl
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    mull %esi
-; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    mull %ebp
+; X86-NEXT:    movl %edx, %ebp
 ; X86-NEXT:    movl %eax, %ecx
-; X86-NEXT:    addl %ebp, %ecx
-; X86-NEXT:    movzbl (%esp), %eax # 1-byte Folded Reload
-; X86-NEXT:    adcl %eax, %ebx
+; X86-NEXT:    addl %esi, %ecx
+; X86-NEXT:    movzbl %bl, %eax
+; X86-NEXT:    adcl %eax, %ebp
 ; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NEXT:    adcl %edi, %ebx
+; X86-NEXT:    adcl %edi, %ebp
 ; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
 ; X86-NEXT:    adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    mull %edi
-; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    mull %esi
+; X86-NEXT:    movl %edx, %ebx
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    mull %edi
-; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    movl %eax, %ebp
-; X86-NEXT:    addl %esi, %ebp
-; X86-NEXT:    adcl $0, %edi
+; X86-NEXT:    mull %esi
+; X86-NEXT:    movl %edx, %esi
+; X86-NEXT:    movl %eax, %edi
+; X86-NEXT:    addl %ebx, %edi
+; X86-NEXT:    adcl $0, %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    mull {{[0-9]+}}(%esp)
-; X86-NEXT:    movl %edx, %esi
-; X86-NEXT:    addl %ebp, %eax
-; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT:    adcl %edi, %esi
+; X86-NEXT:    movl %edx, %ebx
+; X86-NEXT:    addl %edi, %eax
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT:    adcl %esi, %ebx
 ; X86-NEXT:    setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    mull {{[0-9]+}}(%esp)
 ; X86-NEXT:    movl %edx, %edi
-; X86-NEXT:    movl %eax, %ebp
-; X86-NEXT:    addl %esi, %ebp
+; X86-NEXT:    movl %eax, %esi
+; X86-NEXT:    addl %ebx, %esi
 ; X86-NEXT:    movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
 ; X86-NEXT:    adcl %eax, %edi
 ; X86-NEXT:    addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
-; X86-NEXT:    adcl %ebx, (%esp) # 4-byte Folded Spill
-; X86-NEXT:    adcl $0, %ebp
+; X86-NEXT:    adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
+; X86-NEXT:    adcl $0, %esi
 ; X86-NEXT:    adcl $0, %edi
-; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
 ; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    imull %edx, %ecx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    movl %esi, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    movl %ebp, %eax
 ; X86-NEXT:    mull %edx
 ; X86-NEXT:    movl %eax, %ebx
 ; X86-NEXT:    addl %edx, %ecx
-; X86-NEXT:    imull {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    addl %ecx, %esi
-; X86-NEXT:    addl %ebp, %ebx
-; X86-NEXT:    adcl %edi, %esi
+; X86-NEXT:    imull {{[0-9]+}}(%esp), %ebp
+; X86-NEXT:    addl %ecx, %ebp
+; X86-NEXT:    addl %esi, %ebx
+; X86-NEXT:    adcl %edi, %ebp
+; X86-NEXT:    movl %ebp, %esi
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -438,11 +439,11 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-NEXT:    adcl (%esp), %eax # 4-byte Folded Reload
 ; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl (%esp), %eax # 4-byte Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
 ; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-NEXT:    movl %eax, (%esp) # 4-byte Spill
+; X86-NEXT:    movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
 ; X86-NEXT:    adcl $0, %ebx
 ; X86-NEXT:    adcl $0, %esi
 ; X86-NEXT:    addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
@@ -459,30 +460,30 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    addl %ebp, %edi
 ; X86-NEXT:    addl %ebx, %ecx
 ; X86-NEXT:    adcl %esi, %edi
-; X86-NEXT:    movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl %edi, %eax
-; X86-NEXT:    imull {{[0-9]+}}(%esp), %edi
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    movl %edi, (%esp) # 4-byte Spill
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    imull %edx, %esi
+; X86-NEXT:    imull %edx, %edi
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    movl %esi, %eax
 ; X86-NEXT:    mull %edx
-; X86-NEXT:    movl %eax, %ebp
+; X86-NEXT:    movl %eax, %ebx
 ; X86-NEXT:    addl %edx, %edi
-; X86-NEXT:    addl %esi, %edi
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    movl %ebx, %eax
-; X86-NEXT:    imull {{[0-9]+}}(%esp), %ebx
-; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
+; X86-NEXT:    imull {{[0-9]+}}(%esp), %esi
+; X86-NEXT:    addl %edi, %esi
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    imull %edx, %esi
+; X86-NEXT:    imull %edx, %ebp
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    movl %edi, %eax
 ; X86-NEXT:    mull %edx
-; X86-NEXT:    addl %edx, %ebx
-; X86-NEXT:    addl %esi, %ebx
-; X86-NEXT:    addl %ebp, %eax
-; X86-NEXT:    adcl %edi, %ebx
+; X86-NEXT:    addl %edx, %ebp
+; X86-NEXT:    imull {{[0-9]+}}(%esp), %edi
+; X86-NEXT:    addl %ebp, %edi
+; X86-NEXT:    addl %ebx, %eax
+; X86-NEXT:    adcl %esi, %edi
 ; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NEXT:    adcl (%esp), %edi # 4-byte Folded Reload
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
 ; X86-NEXT:    movl %edx, 4(%ecx)
@@ -498,11 +499,11 @@ define i300 @test4(i300 %a, i300 %b) nounwind {
 ; X86-NEXT:    movl %edx, 20(%ecx)
 ; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
 ; X86-NEXT:    movl %edx, 24(%ecx)
-; X86-NEXT:    movl (%esp), %edx # 4-byte Reload
+; X86-NEXT:    movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
 ; X86-NEXT:    movl %edx, 28(%ecx)
 ; X86-NEXT:    movl %eax, 32(%ecx)
-; X86-NEXT:    andl $4095, %ebx # imm = 0xFFF
-; X86-NEXT:    movw %bx, 36(%ecx)
+; X86-NEXT:    andl $4095, %edi # imm = 0xFFF
+; X86-NEXT:    movw %di, 36(%ecx)
 ; X86-NEXT:    movl %ecx, %eax
 ; X86-NEXT:    addl $76, %esp
 ; X86-NEXT:    popl %esi
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index 809735a88f20801..5085d36a3b87055 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -1860,9 +1860,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
 ; SSE2-NEXT:    pandn %xmm1, %xmm2
 ; SSE2-NEXT:    pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT:    por %xmm1, %xmm2
 ; SSE2-NEXT:    paddw %xmm0, %xmm0
 ; SSE2-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT:    por %xmm1, %xmm0
 ; SSE2-NEXT:    por %xmm2, %xmm0
 ; SSE2-NEXT:    retq
 ;
@@ -1952,9 +1952,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
 ; X86-SSE2-NEXT:    pandn %xmm1, %xmm2
 ; X86-SSE2-NEXT:    pmulhuw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT:    por %xmm1, %xmm2
 ; X86-SSE2-NEXT:    paddw %xmm0, %xmm0
 ; X86-SSE2-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    por %xmm1, %xmm0
 ; X86-SSE2-NEXT:    por %xmm2, %xmm0
 ; X86-SSE2-NEXT:    retl
   %res = call <8 x i16> @llvm.fshr.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
diff --git a/llvm/test/MC/AArch64/local-bounds-single-trap.ll b/llvm/test/MC/AArch64/local-bounds-single-trap.ll
index 53a0e010537f096..ef69e9da8317605 100644
--- a/llvm/test/MC/AArch64/local-bounds-single-trap.ll
+++ b/llvm/test/MC/AArch64/local-bounds-single-trap.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -O3 -mtriple arm64-linux -filetype asm -o - %s | FileCheck %s -check-prefix CHECK-ASM
 ; What this test does is check that even with nomerge, the functions still get merged in
 ; compiled code as the ubsantrap call gets lowered to a single instruction: brk.
@@ -8,34 +9,41 @@
 
 ; Function Attrs: noinline nounwind uwtable
 define dso_local void @f8(i32 noundef %i, i32 noundef %k) #0 {
+; CHECK-ASM-LABEL: f8:
+; CHECK-ASM:       // %bb.0: // %entry
+; CHECK-ASM-NEXT:    sub sp, sp, #16
+; CHECK-ASM-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ASM-NEXT:    .cfi_remember_state
+; CHECK-ASM-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-ASM-NEXT:    sxtw x8, w0
+; CHECK-ASM-NEXT:    mov w9, #10 // =0xa
+; CHECK-ASM-NEXT:    stp w1, w0, [sp, #8]
+; CHECK-ASM-NEXT:    sub x9, x9, x8
+; CHECK-ASM-NEXT:    cmp x8, #10
+; CHECK-ASM-NEXT:    ccmp x9, #0, #4, ls
+; CHECK-ASM-NEXT:    b.eq .LBB0_4
+; CHECK-ASM-NEXT:  // %bb.1:
+; CHECK-ASM-NEXT:    ldrsw x9, [sp, #8]
+; CHECK-ASM-NEXT:    adrp x10, B
+; CHECK-ASM-NEXT:    add x10, x10, :lo12:B
+; CHECK-ASM-NEXT:    strb wzr, [x10, x8]
+; CHECK-ASM-NEXT:    cmp x9, #10
+; CHECK-ASM-NEXT:    b.hi .LBB0_4
+; CHECK-ASM-NEXT:  // %bb.2:
+; CHECK-ASM-NEXT:    mov w8, #10 // =0xa
+; CHECK-ASM-NEXT:    sub x8, x8, x9
+; CHECK-ASM-NEXT:    cbz x8, .LBB0_4
+; CHECK-ASM-NEXT:  // %bb.3:
+; CHECK-ASM-NEXT:    adrp x8, B2
+; CHECK-ASM-NEXT:    add x8, x8, :lo12:B2
+; CHECK-ASM-NEXT:    strb wzr, [x8, x9]
+; CHECK-ASM-NEXT:    add sp, sp, #16
+; CHECK-ASM-NEXT:    .cfi_def_cfa_offset 0
+; CHECK-ASM-NEXT:    ret
+; CHECK-ASM-NEXT:  .LBB0_4: // %trap3
+; CHECK-ASM-NEXT:    .cfi_restore_state
+; CHECK-ASM-NEXT:    brk #0x1
 entry:
-; CHECK-ASM: 	cmp	x8, #10
-; CHECK-ASM: 	b.hi	.LBB0_5
-; CHECK-ASM: // %bb.1:                               // %entry
-; CHECK-ASM: 	mov	w9, #10                         // =0xa
-; CHECK-ASM: 	sub	x9, x9, x8
-; CHECK-ASM: 	cbz	x9, .LBB0_5
-; CHECK-ASM: // %bb.2:
-; CHECK-ASM: 	ldrsw	x9, [sp, #8]
-; CHECK-ASM: 	adrp	x10, B
-; CHECK-ASM: 	add	x10, x10, :lo12:B
-; CHECK-ASM: 	strb	wzr, [x10, x8]
-; CHECK-ASM: 	cmp	x9, #10
-; CHECK-ASM: 	b.hi	.LBB0_5
-; CHECK-ASM: // %bb.3:
-; CHECK-ASM: 	mov	w8, #10                         // =0xa
-; CHECK-ASM: 	sub	x8, x8, x9
-; CHECK-ASM: 	cbz	x8, .LBB0_5
-; CHECK-ASM: // %bb.4:
-; CHECK-ASM: 	adrp	x8, B2
-; CHECK-ASM: 	add	x8, x8, :lo12:B2
-; CHECK-ASM: 	strb	wzr, [x8, x9]
-; CHECK-ASM: 	add	sp, sp, #16
-; CHECK-ASM: 	.cfi_def_cfa_offset 0
-; CHECK-ASM: 	ret
-; CHECK-ASM: .LBB0_5:                                // %trap3
-; CHECK-ASM: 	.cfi_restore_state
-; CHECK-ASM: 	brk	#0x1
   %i.addr = alloca i32, align 4
   %k.addr = alloca i32, align 4
   store i32 %i, ptr %i.addr, align 4



More information about the llvm-commits mailing list