[llvm] [InitUndef] Enable the InitUndef pass on AArch64 (PR #108353)
Nikita Popov via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 13 02:57:50 PDT 2024
https://github.com/nikic updated https://github.com/llvm/llvm-project/pull/108353
>From c18eebf74360d784ebe0bfd96fe660d24170f677 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 11 Sep 2024 11:55:49 +0200
Subject: [PATCH 1/4] [InitUndef] Enable the InitUndef pass on all targets
The InitUndef pass works around a register allocation issue, where
undef operands can be allocated to the same register as early-clobber
result operands. This may lead to ISA constraint violations, where
certain input and output registers are not allowed to overlap.
Originally this pass was implemented for RISCV, and then extended
to ARM in #77770. I've since removed the target-specific parts of
the pass in #106744 and #107885. This PR now enables the pass for
all targets.
The motivating case is the one in arm64-ldxr-stxr.ll for the AArch64
target, where we were previously incorrectly allocating a stxp input
and output to the same register.
---
.../include/llvm/CodeGen/TargetRegisterInfo.h | 13 -
.../llvm/CodeGen/TargetSubtargetInfo.h | 6 -
llvm/lib/CodeGen/InitUndef.cpp | 14 -
llvm/lib/Target/ARM/ARMBaseRegisterInfo.h | 14 -
llvm/lib/Target/ARM/ARMSubtarget.h | 7 -
llvm/lib/Target/RISCV/RISCVRegisterInfo.h | 5 -
llvm/lib/Target/RISCV/RISCVSubtarget.h | 2 -
llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll | 3 +-
.../AMDGPU/GlobalISel/mul-known-bits.i64.ll | 58 ++--
llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll | 10 +-
.../atomic_optimizations_global_pointer.ll | 24 +-
.../atomic_optimizations_local_pointer.ll | 23 +-
.../CodeGen/AMDGPU/integer-mad-patterns.ll | 322 +++++++++---------
llvm/test/CodeGen/AMDGPU/mad_64_32.ll | 22 +-
llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll | 22 +-
llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll | 2 +-
...regalloc-last-chance-recoloring-failure.ll | 4 +-
17 files changed, 247 insertions(+), 304 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
index ebf06bc57948f2..1a2f31e199336a 100644
--- a/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetRegisterInfo.h
@@ -1203,19 +1203,6 @@ class TargetRegisterInfo : public MCRegisterInfo {
virtual bool isNonallocatableRegisterCalleeSave(MCRegister Reg) const {
return false;
}
-
- /// Returns if the architecture being targeted has the required Pseudo
- /// Instructions for initializing the register. By default this returns false,
- /// but where it is overriden for an architecture, the behaviour will be
- /// different. This can either be a check to ensure the Register Class is
- /// present, or to return true as an indication the architecture supports the
- /// pass. If using the method that does not check for the Register Class, it
- /// is imperative to ensure all required Pseudo Instructions are implemented,
- /// otherwise compilation may fail with an `Unexpected register class` error.
- virtual bool
- doesRegClassHavePseudoInitUndef(const TargetRegisterClass *RC) const {
- return false;
- }
};
//===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index b4b018f080914a..707842f896b2fe 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -332,12 +332,6 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
/// Get the list of MacroFusion predicates.
virtual std::vector<MacroFusionPredTy> getMacroFusions() const { return {}; };
-
- /// supportsInitUndef is used to determine if an architecture supports
- /// the Init Undef Pass. By default, it is assumed that it will not support
- /// the pass, with architecture specific overrides providing the information
- /// where they are implemented.
- virtual bool supportsInitUndef() const { return false; }
};
} // end namespace llvm
diff --git a/llvm/lib/CodeGen/InitUndef.cpp b/llvm/lib/CodeGen/InitUndef.cpp
index d6f7c0d7cf0f5f..4081cb17b3cfdd 100644
--- a/llvm/lib/CodeGen/InitUndef.cpp
+++ b/llvm/lib/CodeGen/InitUndef.cpp
@@ -120,8 +120,6 @@ bool InitUndef::handleReg(MachineInstr *MI) {
continue;
if (!UseMO.getReg().isVirtual())
continue;
- if (!TRI->doesRegClassHavePseudoInitUndef(MRI->getRegClass(UseMO.getReg())))
- continue;
if (UseMO.isUndef() || findImplictDefMIFromReg(UseMO.getReg(), MRI))
Changed |= fixupIllOperand(MI, UseMO);
@@ -140,8 +138,6 @@ bool InitUndef::handleSubReg(MachineFunction &MF, MachineInstr &MI,
continue;
if (UseMO.isTied())
continue;
- if (!TRI->doesRegClassHavePseudoInitUndef(MRI->getRegClass(UseMO.getReg())))
- continue;
Register Reg = UseMO.getReg();
if (NewRegs.count(Reg))
@@ -245,16 +241,6 @@ bool InitUndef::processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
bool InitUndef::runOnMachineFunction(MachineFunction &MF) {
ST = &MF.getSubtarget();
-
- // supportsInitUndef is implemented to reflect if an architecture has support
- // for the InitUndef pass. Support comes from having the relevant Pseudo
- // instructions that can be used to initialize the register. The function
- // returns false by default so requires an implementation per architecture.
- // Support can be added by overriding the function in a way that best fits
- // the architecture.
- if (!ST->supportsInitUndef())
- return false;
-
MRI = &MF.getRegInfo();
TII = ST->getInstrInfo();
TRI = MRI->getTargetRegisterInfo();
diff --git a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
index 58b5e98fd30b14..926d702b4092a5 100644
--- a/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseRegisterInfo.h
@@ -240,20 +240,6 @@ class ARMBaseRegisterInfo : public ARMGenRegisterInfo {
unsigned SrcSubReg) const override;
int getSEHRegNum(unsigned i) const { return getEncodingValue(i); }
-
- bool doesRegClassHavePseudoInitUndef(
- const TargetRegisterClass *RC) const override {
- (void)RC;
- // For the ARM Architecture we want to always return true because all
- // required PseudoInitUndef types have been added. If compilation fails due
- // to `Unexpected register class`, this is likely to be because the specific
- // register being used is not support by Init Undef and needs the Pseudo
- // Instruction adding to ARMInstrInfo.td. If this is implemented as a
- // conditional check, this could create a false positive where Init Undef is
- // not running, skipping the instruction and moving to the next. This could
- // lead to illegal instructions being generated by the register allocator.
- return true;
- }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 00239ff94b7ba5..fa20f4b590bea5 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -209,13 +209,6 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
return &InstrInfo->getRegisterInfo();
}
- /// The correct instructions have been implemented to initialize undef
- /// registers, therefore the ARM Architecture is supported by the Init Undef
- /// Pass. This will return true as the pass needs to be supported for all
- /// types of instructions. The pass will then perform more checks to ensure it
- /// should be applying the Pseudo Instructions.
- bool supportsInitUndef() const override { return true; }
-
const CallLowering *getCallLowering() const override;
InstructionSelector *getInstructionSelector() const override;
const LegalizerInfo *getLegalizerInfo() const override;
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
index 98a712af085399..cb0bb77d1fcbcb 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h
@@ -130,11 +130,6 @@ struct RISCVRegisterInfo : public RISCVGenRegisterInfo {
const MachineFunction &MF, const VirtRegMap *VRM,
const LiveRegMatrix *Matrix) const override;
- bool doesRegClassHavePseudoInitUndef(
- const TargetRegisterClass *RC) const override {
- return isVRRegClass(RC);
- }
-
static bool isVRRegClass(const TargetRegisterClass *RC) {
return RISCVRI::isVRegClass(RC->TSFlags) &&
RISCVRI::getNF(RC->TSFlags) == 1;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index ea54ff1df0b7cb..bf9ed3f3d71655 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -306,8 +306,6 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
unsigned getTailDupAggressiveThreshold() const {
return TuneInfo->TailDupAggressiveThreshold;
}
-
- bool supportsInitUndef() const override { return hasVInstructions(); }
};
} // End llvm namespace
diff --git a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
index 1a60f8752dd571..4fb0c2775a7a7a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
@@ -354,11 +354,10 @@ define dso_local i32 @test_store_release_i64(i32, i64 %val, ptr %addr) {
}
; The stxp result cannot be allocated to the same register as the inputs.
-; FIXME: This is a miscompile.
define dso_local i32 @test_stxp_undef(ptr %p, i64 %x) nounwind {
; CHECK-LABEL: test_stxp_undef:
; CHECK: // %bb.0:
-; CHECK-NEXT: stxp w8, x8, x1, [x0]
+; CHECK-NEXT: stxp w8, x9, x1, [x0]
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%res = call i32 @llvm.aarch64.stxp(i64 undef, i64 %x, ptr %p)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
index 489f46d1237a36..ae1b54f2aaf9c8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
@@ -33,11 +33,9 @@ define amdgpu_kernel void @v_mul_i64_no_zext(ptr addrspace(1) %out, ptr addrspac
; GFX11-NEXT: global_load_b64 v[2:3], v9, s[2:3]
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v0, v2, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v0, v3, v[5:6]
-; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v1, v2, v[6:7]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v5, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v0, v3, v[5:6]
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v1, v2, v[7:8]
; GFX11-NEXT: global_store_b64 v9, v[4:5], s[2:3]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -85,13 +83,13 @@ define amdgpu_kernel void @v_mul_i64_zext_src1(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_load_b64 v[0:1], v1, s[6:7]
-; GFX11-NEXT: global_load_b32 v5, v2, s[0:1]
+; GFX11-NEXT: global_load_b32 v7, v2, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, v5, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v1, v5, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, v7, 0
; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v1, v7, v[3:4]
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -138,14 +136,14 @@ define amdgpu_kernel void @v_mul_i64_zext_src0(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 2, v0
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v5, v1, s[6:7]
+; GFX11-NEXT: global_load_b32 v7, v1, s[6:7]
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v5, v0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v5, v1, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v7, v0, 0
; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v7, v1, v[3:4]
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -241,14 +239,14 @@ define amdgpu_kernel void @v_mul_i64_masked_src0_hi(ptr addrspace(1) %out, ptr a
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b32 v5, v0, s[6:7]
+; GFX11-NEXT: global_load_b32 v7, v0, s[6:7]
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v5, v0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v5, v1, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v7, v0, 0
; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v7, v1, v[3:4]
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -436,16 +434,14 @@ define amdgpu_kernel void @v_mul_i64_partially_masked_src0(ptr addrspace(1) %out
; GFX11-NEXT: global_load_b64 v[0:1], v2, s[6:7]
; GFX11-NEXT: global_load_b64 v[2:3], v2, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(1)
-; GFX11-NEXT: v_and_b32_e32 v7, 0xfff00000, v0
+; GFX11-NEXT: v_and_b32_e32 v0, 0xfff00000, v0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v7, v2, 0
-; GFX11-NEXT: v_mov_b32_e32 v0, v5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v7, v3, v[0:1]
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v0, v2, 0
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v0, v3, v[5:6]
; GFX11-NEXT: v_and_b32_e32 v3, 0xf00f, v1
-; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[5:6]
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[7:8]
; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v0, 0
; GFX11-NEXT: global_store_b64 v0, v[4:5], s[4:5]
; GFX11-NEXT: s_nop 0
@@ -568,10 +564,12 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, v4, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[1:2]
-; GFX11-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX11-NEXT: v_mov_b32_e32 v1, v3
+; GFX11-NEXT: v_mov_b32_e32 v3, v1
+; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v2, v5, v[3:4]
; GFX11-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX11-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v1, v6
; GFX11-NEXT: .LBB10_2: ; %Flow
; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-NEXT: s_cbranch_execz .LBB10_4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
index 42f1bf84c04207..a7df4e6e75103a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
@@ -667,8 +667,8 @@ define i96 @v_mul_i96(i96 %num, i96 %den) {
; GFX11-NEXT: v_mul_lo_u32 v0, v6, v5
; GFX11-NEXT: v_mad_u64_u32 v[8:9], null, v7, v4, v[0:1]
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v6, v3, 0
-; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v2, v3, v[8:9]
-; GFX11-NEXT: v_mov_b32_e32 v2, v9
+; GFX11-NEXT: v_mad_u64_u32 v[10:11], null, v2, v3, v[8:9]
+; GFX11-NEXT: v_mov_b32_e32 v2, v10
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v4, v[1:2]
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v7, v3, v[1:2]
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2691,9 +2691,9 @@ define amdgpu_ps void @s_mul_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addr
; GFX11-NEXT: global_load_b32 v4, v[2:3], off
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0x50, v4, 0
-; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v4
-; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, 0x50, v6, v[3:4]
-; GFX11-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-NEXT: v_ashrrev_i32_e32 v7, 31, v4
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, 0x50, v7, v[3:4]
+; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index b17dfc7c3754a1..5262247c755b3f 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -550,7 +550,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164-NEXT: s_mov_b32 s7, 0x31016000
; GFX1164-NEXT: s_mov_b32 s6, -1
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_mad_u64_u32 v[1:2], null, s2, v0, s[0:1]
; GFX1164-NEXT: buffer_store_b32 v1, off, s[4:7], 0
; GFX1164-NEXT: s_nop 0
@@ -588,7 +587,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[1:2], null, s0, v0, s[2:3]
; GFX1132-NEXT: buffer_store_b32 v1, off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
@@ -2219,11 +2217,12 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164-NEXT: s_mov_b32 s7, 0x31016000
; GFX1164-NEXT: s_mov_b32 s6, -1
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
-; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s1, v2, v[1:2]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[3:4]
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v3
+; GFX1164-NEXT: v_mov_b32_e32 v1, v5
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1164-NEXT: s_nop 0
; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2265,11 +2264,12 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
-; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s1, v2, v[1:2]
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_mov_b32_e32 v3, v1
+; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[3:4]
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v3
+; GFX1132-NEXT: v_mov_b32_e32 v1, v5
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5918,11 +5918,11 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s7, 0x31016000
; GFX1164-NEXT: s_mov_b32 s6, -1
; GFX1164-NEXT: s_waitcnt_depctr 0xfff
-; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[4:5]
+; GFX1164-NEXT: v_mad_u64_u32 v[6:7], null, s1, v2, v[4:5]
; GFX1164-NEXT: v_readfirstlane_b32 s1, v1
; GFX1164-NEXT: v_sub_co_u32 v0, vcc, s0, v3
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v5
+; GFX1164-NEXT: v_mov_b32_e32 v1, v6
; GFX1164-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s1, v1, vcc
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1164-NEXT: s_nop 0
@@ -5966,11 +5966,11 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[4:5]
+; GFX1132-NEXT: v_mad_u64_u32 v[6:7], null, s1, v2, v[4:5]
; GFX1132-NEXT: v_readfirstlane_b32 s1, v1
; GFX1132-NEXT: v_sub_co_u32 v0, vcc_lo, s0, v3
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v5
+; GFX1132-NEXT: v_mov_b32_e32 v1, v6
; GFX1132-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index 988bc8eec6e517..6a6cf4837d1fef 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -450,7 +450,6 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[1:2], null, s0, v0, s[2:3]
; GFX1132-NEXT: buffer_store_b32 v1, off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
@@ -1939,10 +1938,11 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s2, v2, s[4:5]
; GFX1164-NEXT: s_mov_b32 s2, -1
-; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s3, v2, v[1:2]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[3:4]
; GFX1164-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v3
+; GFX1164-NEXT: v_mov_b32_e32 v1, v5
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1164-NEXT: s_nop 0
; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1979,10 +1979,11 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s2, v2, s[4:5]
; GFX1132-NEXT: s_mov_b32 s2, -1
-; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s3, v2, v[1:2]
+; GFX1132-NEXT: v_mov_b32_e32 v3, v1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[3:4]
; GFX1132-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v3
+; GFX1132-NEXT: v_mov_b32_e32 v1, v5
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1132-NEXT: s_nop 0
; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5468,12 +5469,12 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX1164-NEXT: v_readfirstlane_b32 s2, v0
; GFX1164-NEXT: v_readfirstlane_b32 s4, v1
; GFX1164-NEXT: s_waitcnt_depctr 0xfff
-; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[4:5]
+; GFX1164-NEXT: v_mad_u64_u32 v[6:7], null, s3, v2, v[4:5]
; GFX1164-NEXT: v_sub_co_u32 v0, vcc, s2, v3
; GFX1164-NEXT: s_mov_b32 s3, 0x31016000
; GFX1164-NEXT: s_mov_b32 s2, -1
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v5
+; GFX1164-NEXT: v_mov_b32_e32 v1, v6
; GFX1164-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s4, v1, vcc
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1164-NEXT: s_nop 0
@@ -5510,12 +5511,12 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX1132-NEXT: v_readfirstlane_b32 s2, v0
; GFX1132-NEXT: v_readfirstlane_b32 s4, v1
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[4:5]
+; GFX1132-NEXT: v_mad_u64_u32 v[6:7], null, s3, v2, v[4:5]
; GFX1132-NEXT: v_sub_co_u32 v0, vcc_lo, s2, v3
; GFX1132-NEXT: s_mov_b32 s3, 0x31016000
; GFX1132-NEXT: s_mov_b32 s2, -1
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v5
+; GFX1132-NEXT: v_mov_b32_e32 v1, v6
; GFX1132-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s4, v1, vcc_lo
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1132-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index 9f093cc7b5abf2..a50354c2ca3d46 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -155,13 +155,13 @@ define i32 @clpeak_imad_pat_i32(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v0, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v5, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_i32:
@@ -2775,20 +2775,20 @@ define <2 x i32> @clpeak_imad_pat_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v1, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v0, v2
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v1, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v7, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v8, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v10, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v1, v3
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v3, v7, v[3:4]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[5:6]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v1, v3
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v2, v8, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v4, v10, v[4:5]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[6:7]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v8, v4, v[8:9]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v2i32:
@@ -3165,28 +3165,28 @@ define <3 x i32> @clpeak_imad_pat_v3i32(<3 x i32> %x, <3 x i32> %y) {
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v0, v3
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v1, v4
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v0, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v12, v1, v4
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v9, v2, v5
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v7, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v14, v2, v5
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v10, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v8, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v6, v9, v2
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v12, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v6, v14, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v3
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v1, v4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v6, v5
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v2, v7, v[2:3]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v3, v8, v[3:4]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[7:8], null, v4, v9, v[4:5]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v5, v2, v[5:6]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v6, v3, v[6:7]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v1, v4
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v6, v5
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v2, v10, v[2:3]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[10:11], null, v4, v12, v[4:5]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[12:13], null, v6, v14, v[6:7]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v8, v2, v[8:9]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v10, v4, v[10:11]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v7, v4, v[7:8]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v12, v6, v[12:13]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v3i32:
@@ -3628,40 +3628,38 @@ define <4 x i32> @clpeak_imad_pat_v4i32(<4 x i32> %x, <4 x i32> %y) {
; GFX11-SDAG-LABEL: clpeak_imad_pat_v4i32:
; GFX11-SDAG: ; %bb.0: ; %entry
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, 1, v3
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v2
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, 1, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v13, v3, v7
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v15, v0, v4
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v16, v1, v5
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v17, v2, v6
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v0, v4
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v9, v1, v5
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, v13, v3
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v15, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v2, v6
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v11, v3, v7
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v16, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v9, v17, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v8, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v9, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v12, v10, v2
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v3, v7
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v4
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v11, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v1, v5
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v12, v6
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v0, v7
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v2, v8, v[2:3]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[7:8], null, v3, v9, v[3:4]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v4, v10, v[4:5]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[9:10], null, v5, v11, v[5:6]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[6:7]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v11, v1, v5
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v9, v9, v6
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v7, v13, v[7:8]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[13:14], null, v2, v15, v[2:3]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v11, v16, v[11:12]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[15:16], null, v9, v17, v[9:10]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v7, v3, v[7:8]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v8, v4, v[8:9]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v9, v5, v[9:10]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v13, v2, v[13:14]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v3, v11, v[3:4]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v15, v9, v[15:16]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v5, v7, v[5:6]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v4i32:
@@ -3963,13 +3961,13 @@ define i32 @clpeak_imad_pat_i24(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 24
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v1, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v1, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v5, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_i24:
@@ -4199,13 +4197,13 @@ define i32 @clpeak_imad_pat_u24(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: v_and_b32_e32 v1, 0xffffff, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v1, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v1, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v5, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_u24:
@@ -5178,36 +5176,37 @@ define i64 @clpeak_imad_pat_i64(i64 %x, i64 %y) {
; GFX11-GISEL-LABEL: clpeak_imad_pat_i64:
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_add_co_u32 v7, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v1, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v1, vcc_lo
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v7, v2, 0
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v7, v3, v[1:2]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v8, v2, v[4:5]
-; GFX11-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, v7
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v8, v2, 0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v1
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v0, v8
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v8, v3, v[4:5]
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v4, v2, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v5, v8, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v11, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, 0, v5, vcc_lo
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v7
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v4, v3, v[1:2]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v6, v11, 0
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v10, v2, v[7:8]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, v4
-; GFX11-GISEL-NEXT: v_add_co_u32 v9, vcc_lo, v6, 1
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v6, v12, v[2:3]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v9, v2, v[6:7]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v1, v2, 0
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v3, v9, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v8, v11, v[4:5]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v3, v10, v[1:2]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[1:2], null, v5, v9, v[6:7]
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v4, v9, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v11, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v1, v3, v[6:7]
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, 0, v4, vcc_lo
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v5, v11, 0
+; GFX11-GISEL-NEXT: v_add_co_u32 v13, vcc_lo, v5, 1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v10, v2, v[8:9]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v7, v4
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v3, v13, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v5, v12, v[7:8]
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, 0, v6, vcc_lo
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v6, v11, v[9:10]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v3, v14, v[1:2]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[1:2], null, v4, v13, v[5:6]
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1200-SDAG-LABEL: clpeak_imad_pat_i64:
@@ -5998,61 +5997,66 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) {
; GFX11-GISEL-LABEL: clpeak_imad_pat_v2i64:
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_add_co_u32 v13, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, 0, v1, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v15, vcc_lo, v2, 1
+; GFX11-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v15, vcc_lo, 0, v1, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v16, vcc_lo, v2, 1
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v13, v4, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, 0, v3, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v15, v6, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v13, v5, v[1:2]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v15, v7, v[3:4]
-; GFX11-GISEL-NEXT: v_add_co_u32 v3, vcc_lo, v0, v13
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[10:11], null, v14, v4, v[8:9]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v16, v6, v[9:10]
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, v10, v14, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v18, vcc_lo, v2, v15
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v3, v4, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, v11, v16, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v18, v6, 0
-; GFX11-GISEL-NEXT: v_add_co_u32 v20, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v21, vcc_lo, 0, v10, vcc_lo
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v9
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v8, v20, 0
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v13
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[13:14], null, v3, v5, v[0:1]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v10
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[14:15], null, v18, v7, v[1:2]
-; GFX11-GISEL-NEXT: v_add_co_u32 v18, vcc_lo, v2, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, 0, v11, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[15:16], null, v17, v4, v[13:14]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v14, v4, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, 0, v3, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v16, v6, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v8, v1
+; GFX11-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v0, v14
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v10, v3
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v14, v5, v[8:9]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v16, v7, v[10:11]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[10:11], null, v15, v4, v[12:13]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v17, v6, v[8:9]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v1, v4, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v10, v15, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v20, vcc_lo, v2, v16
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v21, vcc_lo, v11, v17, vcc_lo
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v14, v9
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v12, v18, 0
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[10:11], null, v19, v6, v[14:15]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v8, v21, v[0:1]
-; GFX11-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v8, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, 0, v15, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v17, vcc_lo, v12, 1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v20, v6, 0
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[16:17], null, v1, v5, v[14:15]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[18:19], null, v20, v7, v[13:14]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[13:14], null, v3, v4, v[16:17]
+; GFX11-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v21, v6, v[18:19]
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v10, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v8, v14, 0
+; GFX11-GISEL-NEXT: v_add_co_u32 v15, vcc_lo, v2, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, 0, v11, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v16, vcc_lo, v8, 1
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v12, v22, v[5:6]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v9, v14, 0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v6
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v12, v15, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, 0, v13, vcc_lo
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v4, v17, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, 0, v10, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v15, v20, v[6:7]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v10, v18, v[7:8]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v9, v16, v[1:2]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v4, v19, v[3:4]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v11, v14, v[7:8]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v12, v17, v[8:9]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v8, v3, v[0:1]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v5, v16, 0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, v7
+; GFX11-GISEL-NEXT: v_add_co_u32 v18, vcc_lo, v12, 1
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v12, v11, v[2:3]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v13, v14, v[9:10]
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v9, v1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v6, v18, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, 0, v4, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v4, v15, v[7:8]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v5, v17, v[9:10]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v6, v19, v[3:4]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v11, v16, v[7:8]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v12, v18, v[9:10]
; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v5
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -6913,13 +6917,13 @@ define i32 @clpeak_imad_pat_i32_x2(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v0, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v5, v1
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v6, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v1, v0
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_i32_x2:
@@ -7416,20 +7420,20 @@ define <2 x i32> @clpeak_imad_pat_v2i32_x2(<2 x i32> %x, <2 x i32> %y) {
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v4
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, 1, v5
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v1, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v0, v2
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v1, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, v6, v2
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, v7, v3
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, v8, v2
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, v10, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v2, v0
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v3, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v3, v7, v[3:4]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[5:6]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v3, v1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v2, v8, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v4, v10, v[4:5]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[6:7]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v8, v4, v[8:9]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v2i32_x2:
diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index 8eb0a46cc8b17f..7b5fed41f0789b 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -561,10 +561,10 @@ define i64 @mad_i64_i32_extops_i32_i64(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v0
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v5, v4, v[2:3]
-; GFX1100-NEXT: v_ashrrev_i32_e32 v5, 31, v5
-; GFX1100-NEXT: v_mov_b32_e32 v3, v1
+; GFX1100-NEXT: v_ashrrev_i32_e32 v3, 31, v5
+; GFX1100-NEXT: v_mov_b32_e32 v5, v1
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v5, v4, v[3:4]
+; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v3, v4, v[5:6]
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: mad_i64_i32_extops_i32_i64:
@@ -686,11 +686,12 @@ define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v0
; GFX1100-NEXT: v_mov_b32_e32 v6, v1
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v2, v3, v[4:5]
-; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_and_b32 v5, 1, v6
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[4:5]
+; GFX1100-NEXT: v_and_b32_e32 v6, 1, v6
+; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT: v_mov_b32_e32 v4, v1
+; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v6, v3, v[4:5]
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: mad_u64_u32_bitops_lhs_mask_small:
@@ -761,11 +762,12 @@ define i64 @mad_u64_u32_bitops_rhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
; GFX1100: ; %bb.0:
; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT: v_mov_b32_e32 v6, v0
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[4:5]
-; GFX1100-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_and_b32 v4, 1, v3
+; GFX1100-NEXT: v_and_b32_e32 v5, 1, v3
+; GFX1100-NEXT: v_mov_b32_e32 v3, v1
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v6, v4, v[3:4]
+; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v6, v5, v[3:4]
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: mad_u64_u32_bitops_rhs_mask_small:
diff --git a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
index f3b61bd4d4057e..687550b4497480 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
@@ -18,9 +18,9 @@ define amdgpu_ps float @mad_i32_vvv(i32 %a, i32 %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_vvv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mov_b32_e32 v3, v1
-; GFX11-NEXT: v_mov_b32_e32 v4, v0
-; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v4, v3, v[2:3]
+; GFX11-NEXT: v_mov_b32_e32 v4, v1
+; GFX11-NEXT: v_mov_b32_e32 v5, v0
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v5, v4, v[2:3]
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -127,8 +127,8 @@ define amdgpu_ps float @mad_i32_vcv(i32 %a, i32 %c) {
;
; GFX11-LABEL: mad_i32_vcv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, 42, v[1:2]
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v0, 42, v[1:2]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, 42
%add = add i32 %mul, %c
@@ -194,8 +194,8 @@ define amdgpu_ps float @mad_i32_vsv(i32 %a, i32 inreg %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_vsv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, s0, v[1:2]
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v0, s0, v[1:2]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -216,8 +216,8 @@ define amdgpu_ps float @mad_i32_svv(i32 inreg %a, i32 %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_svv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, v0, v[1:2]
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, s0, v0, v[1:2]
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -289,8 +289,8 @@ define amdgpu_ps float @mad_i32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_ssv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, s0, s1, v[0:1]
-; GFX11-NEXT: v_mov_b32_e32 v0, v1
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, s1, v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
diff --git a/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll b/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll
index 2efbd3277f2098..22ea80ead4c828 100644
--- a/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll
+++ b/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll
@@ -6,7 +6,7 @@ declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f32.v16f16(<16 x half>,
define amdgpu_cs void @xyz () {
; CHECK-LABEL: xyz:
-; CHECK: v_wmma_f32_16x16x16_f16 v[0:3], v[0:7], v[0:7], v[0:3]
+; CHECK: v_wmma_f32_16x16x16_f16 v[0:3], v[4:11], v[12:19], v[0:3]
.entry:
br label %loop
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 0f3fdf08696d61..6a0dbbe356a165 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -25,7 +25,7 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb
; CHECK-NEXT: li a0, 55
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vloxseg2ei32.v v16, (a0), v8
+; CHECK-NEXT: vloxseg2ei32.v v16, (a1), v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -81,7 +81,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 16 * vlenb
; SUBREGLIVENESS-NEXT: li a0, 55
; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v16, (a0), v8
+; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v16, (a1), v8
; SUBREGLIVENESS-NEXT: csrr a0, vlenb
; SUBREGLIVENESS-NEXT: slli a0, a0, 3
; SUBREGLIVENESS-NEXT: add a0, sp, a0
>From 481b8ea6a36771c215db0d69e778a267f679b388 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Thu, 12 Sep 2024 12:42:23 +0200
Subject: [PATCH 2/4] Restore hook, only enable on aarch64
---
.../llvm/CodeGen/TargetSubtargetInfo.h | 5 +-
llvm/lib/CodeGen/InitUndef.cpp | 6 +
llvm/lib/Target/AArch64/AArch64Subtarget.h | 3 +
llvm/lib/Target/ARM/ARMSubtarget.h | 4 +
llvm/lib/Target/RISCV/RISCVSubtarget.h | 4 +
.../AMDGPU/GlobalISel/mul-known-bits.i64.ll | 58 ++--
llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll | 10 +-
.../atomic_optimizations_global_pointer.ll | 24 +-
.../atomic_optimizations_local_pointer.ll | 23 +-
.../CodeGen/AMDGPU/integer-mad-patterns.ll | 322 +++++++++---------
llvm/test/CodeGen/AMDGPU/mad_64_32.ll | 22 +-
llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll | 22 +-
llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll | 2 +-
13 files changed, 260 insertions(+), 245 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index 707842f896b2fe..bc6aa0b8822e20 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -332,8 +332,11 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
/// Get the list of MacroFusion predicates.
virtual std::vector<MacroFusionPredTy> getMacroFusions() const { return {}; };
-};
+ /// Whether the target has instructions where an early-clobber result
+ /// operand cannot overlap with an undef input operand.
+ virtual bool requiresDisjointEarlyClobberAndUndef() const { return false; }
+};
} // end namespace llvm
#endif // LLVM_CODEGEN_TARGETSUBTARGETINFO_H
diff --git a/llvm/lib/CodeGen/InitUndef.cpp b/llvm/lib/CodeGen/InitUndef.cpp
index 4081cb17b3cfdd..5c8aa8b62d3e3b 100644
--- a/llvm/lib/CodeGen/InitUndef.cpp
+++ b/llvm/lib/CodeGen/InitUndef.cpp
@@ -241,6 +241,12 @@ bool InitUndef::processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB,
bool InitUndef::runOnMachineFunction(MachineFunction &MF) {
ST = &MF.getSubtarget();
+
+ // The pass is only needed if early-clobber defs and undef ops cannot be
+ // allocated to the same register.
+ if (!ST->requiresDisjointEarlyClobberAndUndef())
+ return false;
+
MRI = &MF.getRegInfo();
TII = ST->getInstrInfo();
TRI = MRI->getTargetRegisterInfo();
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index accfb49c6fbe3a..d1276b2fcc7356 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -435,6 +435,9 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
return AddressCheckPSV.get();
}
+ /// Instructions like stxp cannot use the same register for input and output.
+ bool requiresDisjointEarlyClobberAndUndef() const override { return true; }
+
private:
/// Pseudo value representing memory load performed to check an address.
///
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index fa20f4b590bea5..323e9bff6d71b8 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -209,6 +209,10 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
return &InstrInfo->getRegisterInfo();
}
+ /// Instructions like vhcadd cannot use the same register for input and
+ /// output.
+ bool requiresDisjointEarlyClobberAndUndef() const override { return true; }
+
const CallLowering *getCallLowering() const override;
InstructionSelector *getInstructionSelector() const override;
const LegalizerInfo *getLegalizerInfo() const override;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index bf9ed3f3d71655..a19bf30277d0e3 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -306,6 +306,10 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
unsigned getTailDupAggressiveThreshold() const {
return TuneInfo->TailDupAggressiveThreshold;
}
+
+ bool requiresDisjointEarlyClobberAndUndef() const override {
+ return hasVInstructions();
+ }
};
} // End llvm namespace
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
index ae1b54f2aaf9c8..489f46d1237a36 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
@@ -33,9 +33,11 @@ define amdgpu_kernel void @v_mul_i64_no_zext(ptr addrspace(1) %out, ptr addrspac
; GFX11-NEXT: global_load_b64 v[2:3], v9, s[2:3]
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v0, v2, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v0, v3, v[5:6]
-; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v1, v2, v[7:8]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v0, v3, v[5:6]
+; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v1, v2, v[6:7]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v5, v7
; GFX11-NEXT: global_store_b64 v9, v[4:5], s[2:3]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -83,13 +85,13 @@ define amdgpu_kernel void @v_mul_i64_zext_src1(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: global_load_b64 v[0:1], v1, s[6:7]
-; GFX11-NEXT: global_load_b32 v7, v2, s[0:1]
+; GFX11-NEXT: global_load_b32 v5, v2, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, v7, 0
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, v5, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v1, v5, v[0:1]
; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v1, v7, v[3:4]
-; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -136,14 +138,14 @@ define amdgpu_kernel void @v_mul_i64_zext_src0(ptr addrspace(1) %out, ptr addrsp
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 2, v0
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: global_load_b32 v7, v1, s[6:7]
+; GFX11-NEXT: global_load_b32 v5, v1, s[6:7]
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v7, v0, 0
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v5, v0, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v5, v1, v[0:1]
; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v7, v1, v[3:4]
-; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -239,14 +241,14 @@ define amdgpu_kernel void @v_mul_i64_masked_src0_hi(ptr addrspace(1) %out, ptr a
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b32 v7, v0, s[6:7]
+; GFX11-NEXT: global_load_b32 v5, v0, s[6:7]
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v7, v0, 0
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v5, v0, 0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v5, v1, v[0:1]
; GFX11-NEXT: v_mov_b32_e32 v0, 0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v7, v1, v[3:4]
-; GFX11-NEXT: v_mov_b32_e32 v3, v5
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -434,14 +436,16 @@ define amdgpu_kernel void @v_mul_i64_partially_masked_src0(ptr addrspace(1) %out
; GFX11-NEXT: global_load_b64 v[0:1], v2, s[6:7]
; GFX11-NEXT: global_load_b64 v[2:3], v2, s[0:1]
; GFX11-NEXT: s_waitcnt vmcnt(1)
-; GFX11-NEXT: v_and_b32_e32 v0, 0xfff00000, v0
+; GFX11-NEXT: v_and_b32_e32 v7, 0xfff00000, v0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v0, v2, 0
-; GFX11-NEXT: v_mad_u64_u32 v[7:8], null, v0, v3, v[5:6]
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, v7, v2, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, v5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v7, v3, v[0:1]
; GFX11-NEXT: v_and_b32_e32 v3, 0xf00f, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[7:8]
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[5:6]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_dual_mov_b32 v5, v0 :: v_dual_mov_b32 v0, 0
; GFX11-NEXT: global_store_b64 v0, v[4:5], s[4:5]
; GFX11-NEXT: s_nop 0
@@ -564,12 +568,10 @@ define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1)
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v2, v4, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v3, v1
-; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, v2, v5, v[3:4]
-; GFX11-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[1:2]
; GFX11-NEXT: ; implicit-def: $vgpr4_vgpr5
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v1, v6
+; GFX11-NEXT: v_mov_b32_e32 v1, v3
+; GFX11-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX11-NEXT: .LBB10_2: ; %Flow
; GFX11-NEXT: s_and_not1_saveexec_b32 s0, s0
; GFX11-NEXT: s_cbranch_execz .LBB10_4
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
index a7df4e6e75103a..42f1bf84c04207 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
@@ -667,8 +667,8 @@ define i96 @v_mul_i96(i96 %num, i96 %den) {
; GFX11-NEXT: v_mul_lo_u32 v0, v6, v5
; GFX11-NEXT: v_mad_u64_u32 v[8:9], null, v7, v4, v[0:1]
; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v6, v3, 0
-; GFX11-NEXT: v_mad_u64_u32 v[10:11], null, v2, v3, v[8:9]
-; GFX11-NEXT: v_mov_b32_e32 v2, v10
+; GFX11-NEXT: v_mad_u64_u32 v[9:10], null, v2, v3, v[8:9]
+; GFX11-NEXT: v_mov_b32_e32 v2, v9
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v6, v4, v[1:2]
; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, v7, v3, v[1:2]
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2691,9 +2691,9 @@ define amdgpu_ps void @s_mul_u64_sext_with_vregs(ptr addrspace(1) %out, ptr addr
; GFX11-NEXT: global_load_b32 v4, v[2:3], off
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0x50, v4, 0
-; GFX11-NEXT: v_ashrrev_i32_e32 v7, 31, v4
-; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, 0x50, v7, v[3:4]
-; GFX11-NEXT: v_mov_b32_e32 v3, v5
+; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, 0x50, v6, v[3:4]
+; GFX11-NEXT: v_mov_b32_e32 v3, v4
; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
; GFX11-NEXT: s_nop 0
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 5262247c755b3f..b17dfc7c3754a1 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -550,6 +550,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164-NEXT: s_mov_b32 s7, 0x31016000
; GFX1164-NEXT: s_mov_b32 s6, -1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_mad_u64_u32 v[1:2], null, s2, v0, s[0:1]
; GFX1164-NEXT: buffer_store_b32 v1, off, s[4:7], 0
; GFX1164-NEXT: s_nop 0
@@ -587,6 +588,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[1:2], null, s0, v0, s[2:3]
; GFX1132-NEXT: buffer_store_b32 v1, off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
@@ -2217,12 +2219,11 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
; GFX1164-NEXT: s_mov_b32 s7, 0x31016000
; GFX1164-NEXT: s_mov_b32 s6, -1
-; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v3, v1
-; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[3:4]
+; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
+; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s1, v2, v[1:2]
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v5
+; GFX1164-NEXT: v_mov_b32_e32 v1, v3
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1164-NEXT: s_nop 0
; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2264,12 +2265,11 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
-; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v3, v1
-; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[3:4]
+; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s0, v2, s[2:3]
+; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s1, v2, v[1:2]
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v5
+; GFX1132-NEXT: v_mov_b32_e32 v1, v3
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5918,11 +5918,11 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1164-NEXT: s_mov_b32 s7, 0x31016000
; GFX1164-NEXT: s_mov_b32 s6, -1
; GFX1164-NEXT: s_waitcnt_depctr 0xfff
-; GFX1164-NEXT: v_mad_u64_u32 v[6:7], null, s1, v2, v[4:5]
+; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[4:5]
; GFX1164-NEXT: v_readfirstlane_b32 s1, v1
; GFX1164-NEXT: v_sub_co_u32 v0, vcc, s0, v3
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v6
+; GFX1164-NEXT: v_mov_b32_e32 v1, v5
; GFX1164-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s1, v1, vcc
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1164-NEXT: s_nop 0
@@ -5966,11 +5966,11 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, ptr addrspace(
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX1132-NEXT: v_mad_u64_u32 v[6:7], null, s1, v2, v[4:5]
+; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s1, v2, v[4:5]
; GFX1132-NEXT: v_readfirstlane_b32 s1, v1
; GFX1132-NEXT: v_sub_co_u32 v0, vcc_lo, s0, v3
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v6
+; GFX1132-NEXT: v_mov_b32_e32 v1, v5
; GFX1132-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index 6a6cf4837d1fef..988bc8eec6e517 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -450,6 +450,7 @@ define amdgpu_kernel void @add_i32_uniform(ptr addrspace(1) %out, i32 %additive)
; GFX1132-NEXT: s_mov_b32 s7, 0x31016000
; GFX1132-NEXT: s_mov_b32 s6, -1
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[1:2], null, s0, v0, s[2:3]
; GFX1132-NEXT: buffer_store_b32 v1, off, s[4:7], 0
; GFX1132-NEXT: s_nop 0
@@ -1938,11 +1939,10 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mad_u64_u32 v[0:1], null, s2, v2, s[4:5]
; GFX1164-NEXT: s_mov_b32 s2, -1
-; GFX1164-NEXT: v_mov_b32_e32 v3, v1
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[3:4]
+; GFX1164-NEXT: v_mad_u64_u32 v[3:4], null, s3, v2, v[1:2]
; GFX1164-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1164-NEXT: v_mov_b32_e32 v1, v5
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_mov_b32_e32 v1, v3
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1164-NEXT: s_nop 0
; GFX1164-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -1979,11 +1979,10 @@ define amdgpu_kernel void @add_i64_uniform(ptr addrspace(1) %out, i64 %additive)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: v_mad_u64_u32 v[0:1], null, s2, v2, s[4:5]
; GFX1132-NEXT: s_mov_b32 s2, -1
-; GFX1132-NEXT: v_mov_b32_e32 v3, v1
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[3:4]
+; GFX1132-NEXT: v_mad_u64_u32 v[3:4], null, s3, v2, v[1:2]
; GFX1132-NEXT: s_mov_b32 s3, 0x31016000
-; GFX1132-NEXT: v_mov_b32_e32 v1, v5
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_mov_b32_e32 v1, v3
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1132-NEXT: s_nop 0
; GFX1132-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -5469,12 +5468,12 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX1164-NEXT: v_readfirstlane_b32 s2, v0
; GFX1164-NEXT: v_readfirstlane_b32 s4, v1
; GFX1164-NEXT: s_waitcnt_depctr 0xfff
-; GFX1164-NEXT: v_mad_u64_u32 v[6:7], null, s3, v2, v[4:5]
+; GFX1164-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[4:5]
; GFX1164-NEXT: v_sub_co_u32 v0, vcc, s2, v3
; GFX1164-NEXT: s_mov_b32 s3, 0x31016000
; GFX1164-NEXT: s_mov_b32 s2, -1
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: v_mov_b32_e32 v1, v6
+; GFX1164-NEXT: v_mov_b32_e32 v1, v5
; GFX1164-NEXT: v_sub_co_ci_u32_e32 v1, vcc, s4, v1, vcc
; GFX1164-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1164-NEXT: s_nop 0
@@ -5511,12 +5510,12 @@ define amdgpu_kernel void @sub_i64_uniform(ptr addrspace(1) %out, i64 %subitive)
; GFX1132-NEXT: v_readfirstlane_b32 s2, v0
; GFX1132-NEXT: v_readfirstlane_b32 s4, v1
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1132-NEXT: v_mad_u64_u32 v[6:7], null, s3, v2, v[4:5]
+; GFX1132-NEXT: v_mad_u64_u32 v[5:6], null, s3, v2, v[4:5]
; GFX1132-NEXT: v_sub_co_u32 v0, vcc_lo, s2, v3
; GFX1132-NEXT: s_mov_b32 s3, 0x31016000
; GFX1132-NEXT: s_mov_b32 s2, -1
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1132-NEXT: v_mov_b32_e32 v1, v6
+; GFX1132-NEXT: v_mov_b32_e32 v1, v5
; GFX1132-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s4, v1, vcc_lo
; GFX1132-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX1132-NEXT: s_nop 0
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index a50354c2ca3d46..9f093cc7b5abf2 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -155,13 +155,13 @@ define i32 @clpeak_imad_pat_i32(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v0, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v5, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_i32:
@@ -2775,20 +2775,20 @@ define <2 x i32> @clpeak_imad_pat_v2i32(<2 x i32> %x, <2 x i32> %y) {
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v0, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v1, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v2
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v1, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v8, v0
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v10, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v7, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v1, v3
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v2, v8, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v4, v10, v[4:5]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[6:7]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v8, v4, v[8:9]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v1, v3
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v3, v7, v[3:4]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[5:6]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v2i32:
@@ -3165,28 +3165,28 @@ define <3 x i32> @clpeak_imad_pat_v3i32(<3 x i32> %x, <3 x i32> %y) {
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v0, v3
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v12, v1, v4
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v0, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v1, v4
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v14, v2, v5
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v10, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v9, v2, v5
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v7, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v12, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v6, v14, v2
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v8, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v6, v9, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v3
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v1, v4
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v6, v5
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v2, v10, v[2:3]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[10:11], null, v4, v12, v[4:5]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[12:13], null, v6, v14, v[6:7]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v8, v2, v[8:9]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v10, v4, v[10:11]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v1, v4
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v6, v5
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v2, v7, v[2:3]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v3, v8, v[3:4]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[7:8], null, v4, v9, v[4:5]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v5, v2, v[5:6]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v6, v3, v[6:7]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v12, v6, v[12:13]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v7, v4, v[7:8]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v3i32:
@@ -3628,38 +3628,40 @@ define <4 x i32> @clpeak_imad_pat_v4i32(<4 x i32> %x, <4 x i32> %y) {
; GFX11-SDAG-LABEL: clpeak_imad_pat_v4i32:
; GFX11-SDAG: ; %bb.0: ; %entry
; GFX11-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, 1, v3
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v13, v3, v7
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v15, v0, v4
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v16, v1, v5
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v17, v2, v6
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, 1, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, v13, v3
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v15, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v0, v4
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v9, v1, v5
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v16, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v9, v17, v2
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v2, v6
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v11, v3, v7
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v3, v7
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v8, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v9, v1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v12, v10, v2
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v4
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v11, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v11, v1, v5
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v9, v9, v6
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v7, v13, v[7:8]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[13:14], null, v2, v15, v[2:3]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v11, v16, v[11:12]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[15:16], null, v9, v17, v[9:10]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v1, v5
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v12, v6
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v13, v2, v[13:14]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v3, v11, v[3:4]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v15, v9, v[15:16]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v5, v7, v[5:6]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v0, v7
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v2, v8, v[2:3]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[7:8], null, v3, v9, v[3:4]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v4, v10, v[4:5]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[9:10], null, v5, v11, v[5:6]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[6:7]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v7, v3, v[7:8]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[2:3], null, v8, v4, v[8:9]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v9, v5, v[9:10]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v4i32:
@@ -3961,13 +3963,13 @@ define i32 @clpeak_imad_pat_i24(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: v_bfe_i32 v1, v1, 0, 24
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v1, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v1, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v5, v0
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_i24:
@@ -4197,13 +4199,13 @@ define i32 @clpeak_imad_pat_u24(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: v_and_b32_e32 v1, 0xffffff, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, 1, v0
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v1, v0
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v1, v0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v6, v0
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v0, v5, v0
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v0, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_u24:
@@ -5176,37 +5178,36 @@ define i64 @clpeak_imad_pat_i64(i64 %x, i64 %y) {
; GFX11-GISEL-LABEL: clpeak_imad_pat_i64:
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0, v1, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v7, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v1, vcc_lo
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v8, v2, 0
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v0, v8
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v8, v3, v[4:5]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v7, v2, 0
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v7, v3, v[1:2]
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v9, v2, v[6:7]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v1, v2, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v4, v9, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v8, v2, v[4:5]
+; GFX11-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, v7
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v4, v2, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, v5, v8, vcc_lo
; GFX11-GISEL-NEXT: v_add_co_u32 v11, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v1, v3, v[6:7]
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, 0, v4, vcc_lo
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v5, v11, 0
-; GFX11-GISEL-NEXT: v_add_co_u32 v13, vcc_lo, v5, 1
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v10, v2, v[8:9]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v7, v4
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v3, v13, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v5, v12, v[7:8]
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, 0, v6, vcc_lo
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v6, v11, v[9:10]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v3, v14, v[1:2]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[1:2], null, v4, v13, v[5:6]
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v12, vcc_lo, 0, v5, vcc_lo
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v7
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v4, v3, v[1:2]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v6, v11, 0
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v10, v2, v[7:8]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-GISEL-NEXT: v_add_co_u32 v9, vcc_lo, v6, 1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v6, v12, v[2:3]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v3, v9, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v8, v11, v[4:5]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v3, v10, v[1:2]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[1:2], null, v5, v9, v[6:7]
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1200-SDAG-LABEL: clpeak_imad_pat_i64:
@@ -5997,66 +5998,61 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) {
; GFX11-GISEL-LABEL: clpeak_imad_pat_v2i64:
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v15, vcc_lo, 0, v1, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v16, vcc_lo, v2, 1
+; GFX11-GISEL-NEXT: v_add_co_u32 v13, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v14, vcc_lo, 0, v1, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v15, vcc_lo, v2, 1
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v14, v4, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, 0, v3, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v16, v6, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v8, v1
-; GFX11-GISEL-NEXT: v_add_co_u32 v1, vcc_lo, v0, v14
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v10, v3
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v14, v5, v[8:9]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v16, v7, v[10:11]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[10:11], null, v15, v4, v[12:13]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v17, v6, v[8:9]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v1, v4, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v10, v15, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v20, vcc_lo, v2, v16
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v21, vcc_lo, v11, v17, vcc_lo
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v14, v9
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v13, v4, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, 0, v3, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v15, v6, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v13, v5, v[1:2]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v15, v7, v[3:4]
+; GFX11-GISEL-NEXT: v_add_co_u32 v3, vcc_lo, v0, v13
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[10:11], null, v14, v4, v[8:9]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v16, v6, v[9:10]
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, v10, v14, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v18, vcc_lo, v2, v15
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v3, v4, 0
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, v11, v16, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v18, v6, 0
+; GFX11-GISEL-NEXT: v_add_co_u32 v20, vcc_lo, v0, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v21, vcc_lo, 0, v10, vcc_lo
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v9
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v8, v20, 0
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v13
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[13:14], null, v3, v5, v[0:1]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v10
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[14:15], null, v18, v7, v[1:2]
+; GFX11-GISEL-NEXT: v_add_co_u32 v18, vcc_lo, v2, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v22, vcc_lo, 0, v11, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[15:16], null, v17, v4, v[13:14]
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v20, v6, 0
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[16:17], null, v1, v5, v[14:15]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[18:19], null, v20, v7, v[13:14]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[13:14], null, v3, v4, v[16:17]
-; GFX11-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v0, 1
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v21, v6, v[18:19]
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v10, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v8, v14, 0
-; GFX11-GISEL-NEXT: v_add_co_u32 v15, vcc_lo, v2, 1
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, 0, v11, vcc_lo
-; GFX11-GISEL-NEXT: v_add_co_u32 v16, vcc_lo, v8, 1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[4:5], null, v12, v18, 0
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[10:11], null, v19, v6, v[14:15]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v8, v21, v[0:1]
+; GFX11-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v8, 1
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, 0, v15, vcc_lo
+; GFX11-GISEL-NEXT: v_add_co_u32 v17, vcc_lo, v12, 1
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, v6
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[6:7], null, v12, v15, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, 0, v13, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v12, v22, v[5:6]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v9, v14, 0
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v8, v3, v[0:1]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[0:1], null, v5, v16, 0
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v2, v7
-; GFX11-GISEL-NEXT: v_add_co_u32 v18, vcc_lo, v12, 1
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v4, v17, 0
+; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, 0, v10, vcc_lo
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v15, v20, v[6:7]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v10, v18, v[7:8]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v9, v16, v[1:2]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[8:9], null, v4, v19, v[3:4]
+; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v11, v14, v[7:8]
+; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v12, v17, v[8:9]
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v12, v11, v[2:3]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[11:12], null, v13, v14, v[9:10]
-; GFX11-GISEL-NEXT: v_mov_b32_e32 v9, v1
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[2:3], null, v6, v18, 0
-; GFX11-GISEL-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, 0, v4, vcc_lo
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[12:13], null, v4, v15, v[7:8]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[7:8], null, v5, v17, v[9:10]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[9:10], null, v6, v19, v[3:4]
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[5:6], null, v11, v16, v[7:8]
-; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-GISEL-NEXT: v_mad_u64_u32 v[3:4], null, v12, v18, v[9:10]
; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, v5
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -6917,13 +6913,13 @@ define i32 @clpeak_imad_pat_i32_x2(i32 %x, i32 %y) {
; GFX11-SDAG-NEXT: v_mul_lo_u32 v0, v1, v0
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, 1, v2
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v1
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v6, v1
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v5, v0, v1
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v1, v5, v1
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v1, v0
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[3:4], null, v2, v5, v[2:3]
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v3, v2, v[3:4]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_i32_x2:
@@ -7420,20 +7416,20 @@ define <2 x i32> @clpeak_imad_pat_v2i32_x2(<2 x i32> %x, <2 x i32> %y) {
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, 1, v4
; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, 1, v5
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v8, v0, v2
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v10, v1, v3
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v6, v0, v2
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v7, v1, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, v8, v2
-; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, v10, v3
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v2, v6, v2
+; GFX11-SDAG-NEXT: v_add_nc_u32_e32 v3, v7, v3
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-SDAG-NEXT: v_mul_lo_u32 v2, v2, v0
-; GFX11-SDAG-NEXT: v_mul_lo_u32 v4, v3, v1
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[6:7], null, v2, v8, v[2:3]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[8:9], null, v4, v10, v[4:5]
-; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[6:7]
-; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v8, v4, v[8:9]
+; GFX11-SDAG-NEXT: v_mul_lo_u32 v3, v3, v1
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[4:5], null, v2, v6, v[2:3]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[5:6], null, v3, v7, v[3:4]
+; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[0:1], null, v4, v2, v[4:5]
+; GFX11-SDAG-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[5:6]
; GFX11-SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-GISEL-LABEL: clpeak_imad_pat_v2i32_x2:
diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index 7b5fed41f0789b..8eb0a46cc8b17f 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -561,10 +561,10 @@ define i64 @mad_i64_i32_extops_i32_i64(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_mov_b32 v5, v0
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v5, v4, v[2:3]
-; GFX1100-NEXT: v_ashrrev_i32_e32 v3, 31, v5
-; GFX1100-NEXT: v_mov_b32_e32 v5, v1
+; GFX1100-NEXT: v_ashrrev_i32_e32 v5, 31, v5
+; GFX1100-NEXT: v_mov_b32_e32 v3, v1
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v3, v4, v[5:6]
+; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v5, v4, v[3:4]
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: mad_i64_i32_extops_i32_i64:
@@ -686,12 +686,11 @@ define i64 @mad_u64_u32_bitops_lhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v0
; GFX1100-NEXT: v_mov_b32_e32 v6, v1
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v2, v3, v[4:5]
-; GFX1100-NEXT: v_and_b32_e32 v6, 1, v6
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1100-NEXT: v_mov_b32_e32 v4, v1
-; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v6, v3, v[4:5]
+; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v2, v3, v[4:5]
+; GFX1100-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_and_b32 v5, 1, v6
+; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v5, v3, v[4:5]
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: mad_u64_u32_bitops_lhs_mask_small:
@@ -762,12 +761,11 @@ define i64 @mad_u64_u32_bitops_rhs_mask_small(i64 %arg0, i64 %arg1, i64 %arg2) #
; GFX1100: ; %bb.0:
; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1100-NEXT: v_mov_b32_e32 v6, v0
-; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1100-NEXT: v_mad_u64_u32 v[0:1], null, v6, v2, v[4:5]
-; GFX1100-NEXT: v_and_b32_e32 v5, 1, v3
-; GFX1100-NEXT: v_mov_b32_e32 v3, v1
+; GFX1100-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_and_b32 v4, 1, v3
; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v6, v5, v[3:4]
+; GFX1100-NEXT: v_mad_u64_u32 v[1:2], null, v6, v4, v[3:4]
; GFX1100-NEXT: s_setpc_b64 s[30:31]
;
; GFX1150-LABEL: mad_u64_u32_bitops_rhs_mask_small:
diff --git a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
index 687550b4497480..f3b61bd4d4057e 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_u64_u32.ll
@@ -18,9 +18,9 @@ define amdgpu_ps float @mad_i32_vvv(i32 %a, i32 %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_vvv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mov_b32_e32 v4, v1
-; GFX11-NEXT: v_mov_b32_e32 v5, v0
-; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v5, v4, v[2:3]
+; GFX11-NEXT: v_mov_b32_e32 v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v4, v0
+; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, v4, v3, v[2:3]
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -127,8 +127,8 @@ define amdgpu_ps float @mad_i32_vcv(i32 %a, i32 %c) {
;
; GFX11-LABEL: mad_i32_vcv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v0, 42, v[1:2]
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, 42, v[1:2]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, 42
%add = add i32 %mul, %c
@@ -194,8 +194,8 @@ define amdgpu_ps float @mad_i32_vsv(i32 %a, i32 inreg %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_vsv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v0, s0, v[1:2]
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, v0, s0, v[1:2]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -216,8 +216,8 @@ define amdgpu_ps float @mad_i32_svv(i32 inreg %a, i32 %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_svv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, s0, v0, v[1:2]
-; GFX11-NEXT: v_mov_b32_e32 v0, v3
+; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, v0, v[1:2]
+; GFX11-NEXT: v_mov_b32_e32 v0, v2
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
@@ -289,8 +289,8 @@ define amdgpu_ps float @mad_i32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
;
; GFX11-LABEL: mad_i32_ssv:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, s0, s1, v[0:1]
-; GFX11-NEXT: v_mov_b32_e32 v0, v2
+; GFX11-NEXT: v_mad_u64_u32 v[1:2], null, s0, s1, v[0:1]
+; GFX11-NEXT: v_mov_b32_e32 v0, v1
; GFX11-NEXT: ; return to shader part epilog
%mul = mul i32 %a, %b
%add = add i32 %mul, %c
diff --git a/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll b/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll
index 22ea80ead4c828..2efbd3277f2098 100644
--- a/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll
+++ b/llvm/test/CodeGen/AMDGPU/wmma_modifiers.ll
@@ -6,7 +6,7 @@ declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f32.v16f16(<16 x half>,
define amdgpu_cs void @xyz () {
; CHECK-LABEL: xyz:
-; CHECK: v_wmma_f32_16x16x16_f16 v[0:3], v[4:11], v[12:19], v[0:3]
+; CHECK: v_wmma_f32_16x16x16_f16 v[0:3], v[0:7], v[0:7], v[0:3]
.entry:
br label %loop
>From 4609c881dee555de282ef84b20073ec3ee067e74 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Fri, 13 Sep 2024 11:51:27 +0200
Subject: [PATCH 3/4] Enable by default, disable on AMDGPU
---
llvm/include/llvm/CodeGen/TargetSubtargetInfo.h | 5 ++++-
llvm/lib/Target/AArch64/AArch64Subtarget.h | 3 ---
llvm/lib/Target/AMDGPU/GCNSubtarget.h | 6 ++++++
llvm/lib/Target/AMDGPU/R600Subtarget.h | 6 ++++++
llvm/lib/Target/ARM/ARMSubtarget.h | 4 ----
llvm/lib/Target/RISCV/RISCVSubtarget.h | 4 ----
6 files changed, 16 insertions(+), 12 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index bc6aa0b8822e20..bfaa6450779ae0 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -335,7 +335,10 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
/// Whether the target has instructions where an early-clobber result
/// operand cannot overlap with an undef input operand.
- virtual bool requiresDisjointEarlyClobberAndUndef() const { return false; }
+ virtual bool requiresDisjointEarlyClobberAndUndef() const {
+ // Conservatively assume such instructions exist by default.
+ return true;
+ }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index d1276b2fcc7356..accfb49c6fbe3a 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -435,9 +435,6 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
return AddressCheckPSV.get();
}
- /// Instructions like stxp cannot use the same register for input and output.
- bool requiresDisjointEarlyClobberAndUndef() const override { return true; }
-
private:
/// Pseudo value representing memory load performed to check an address.
///
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 7b74eab96c5677..a4ae8a1be32258 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1587,6 +1587,12 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
// the nop.
return true;
}
+
+ bool requiresDisjointEarlyClobberAndUndef() const override {
+ // AMDGPU doesn't care if early-clobber and undef operands are allocated
+ // to the same register.
+ return false;
+ }
};
class GCNUserSGPRUsageInfo {
diff --git a/llvm/lib/Target/AMDGPU/R600Subtarget.h b/llvm/lib/Target/AMDGPU/R600Subtarget.h
index c3d002f29272de..7f0f9305e1fa6c 100644
--- a/llvm/lib/Target/AMDGPU/R600Subtarget.h
+++ b/llvm/lib/Target/AMDGPU/R600Subtarget.h
@@ -160,6 +160,12 @@ class R600Subtarget final : public R600GenSubtargetInfo,
unsigned getMinWavesPerEU() const override {
return AMDGPU::IsaInfo::getMinWavesPerEU(this);
}
+
+ bool requiresDisjointEarlyClobberAndUndef() const override {
+ // AMDGPU doesn't care if early-clobber and undef operands are allocated
+ // to the same register.
+ return false;
+ }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 323e9bff6d71b8..fa20f4b590bea5 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -209,10 +209,6 @@ class ARMSubtarget : public ARMGenSubtargetInfo {
return &InstrInfo->getRegisterInfo();
}
- /// Instructions like vhcadd cannot use the same register for input and
- /// output.
- bool requiresDisjointEarlyClobberAndUndef() const override { return true; }
-
const CallLowering *getCallLowering() const override;
InstructionSelector *getInstructionSelector() const override;
const LegalizerInfo *getLegalizerInfo() const override;
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index a19bf30277d0e3..bf9ed3f3d71655 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -306,10 +306,6 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
unsigned getTailDupAggressiveThreshold() const {
return TuneInfo->TailDupAggressiveThreshold;
}
-
- bool requiresDisjointEarlyClobberAndUndef() const override {
- return hasVInstructions();
- }
};
} // End llvm namespace
>From 812d5e7e22d6c786ef3001beda8789657321b01f Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Fri, 13 Sep 2024 11:57:31 +0200
Subject: [PATCH 4/4] Add MIR test
---
llvm/test/CodeGen/AArch64/init-undef.mir | 44 ++++++++++++++++++++++++
1 file changed, 44 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/init-undef.mir
diff --git a/llvm/test/CodeGen/AArch64/init-undef.mir b/llvm/test/CodeGen/AArch64/init-undef.mir
new file mode 100644
index 00000000000000..92e9cd4b60db75
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/init-undef.mir
@@ -0,0 +1,44 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64-- -run-pass=init-undef -o - %s | FileCheck %s
+
+--- |
+ define dso_local i32 @test_stxp_undef(ptr %p, i64 %x) #0 {
+ %res = call i32 @llvm.aarch64.stxp(i64 undef, i64 %x, ptr %p)
+ ret i32 %res
+ }
+
+...
+---
+name: test_stxp_undef
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gpr64common }
+ - { id: 1, class: gpr64 }
+ - { id: 2, class: gpr32 }
+ - { id: 3, class: gpr64 }
+liveins:
+ - { reg: '$x0', virtual-reg: '%0' }
+ - { reg: '$x1', virtual-reg: '%1' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $x0, $x1
+
+ ; CHECK-LABEL: name: test_stxp_undef
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:gpr64 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INIT_UNDEF:%[0-9]+]]:gpr64 = INIT_UNDEF
+ ; CHECK-NEXT: early-clobber %2:gpr32 = STXPX killed [[INIT_UNDEF]], [[COPY]], [[COPY1]] :: (volatile store (s128) into %ir.p)
+ ; CHECK-NEXT: $w0 = COPY %2
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ %1:gpr64 = COPY $x1
+ %0:gpr64common = COPY $x0
+ %3:gpr64 = IMPLICIT_DEF
+ early-clobber %2:gpr32 = STXPX killed %3, %1, %0 :: (volatile store (s128) into %ir.p)
+ $w0 = COPY %2
+ RET_ReallyLR implicit $w0
+
+...
More information about the llvm-commits
mailing list