[llvm-branch-commits] [llvm] [AMDGPU][GlobalISel] Add RegBankLegalize support for S64 G_MUL (PR #175889)
via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Jan 13 21:41:05 PST 2026
https://github.com/vangthao95 created https://github.com/llvm/llvm-project/pull/175889
Patch 4 of 4 patches to implement full G_MUL support in regbanklegalize.
>From aefccacf4a129bf3e3fc961d75b6442f702622bc Mon Sep 17 00:00:00 2001
From: Vang Thao <vang.thao at amd.com>
Date: Tue, 13 Jan 2026 20:37:48 -0800
Subject: [PATCH] [AMDGPU][GlobalISel] Add RegBankLegalize support for S64
G_MUL
Patch 4 of 4 patches to implement full G_MUL support in regbanklegalize.
---
.../AMDGPU/AMDGPURegBankLegalizeHelper.cpp | 23 +
.../AMDGPU/AMDGPURegBankLegalizeHelper.h | 1 +
.../AMDGPU/AMDGPURegBankLegalizeRules.cpp | 6 +-
.../AMDGPU/AMDGPURegBankLegalizeRules.h | 1 +
llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll | 133 ++++--
.../AMDGPU/GlobalISel/regbankselect-mul.mir | 42 +-
.../CodeGen/AMDGPU/integer-mad-patterns.ll | 207 ++++-----
llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll | 398 +++++++++---------
8 files changed, 443 insertions(+), 368 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
index 1a8bd6d8de261..a60366e5382a6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp
@@ -842,6 +842,27 @@ bool RegBankLegalizeHelper::lowerUniMAD64(MachineInstr &MI) {
return true;
}
+bool RegBankLegalizeHelper::lowerSplitTo32Mul(MachineInstr &MI) {
+ Register Dst = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(Dst);
+ assert(DstTy == S64);
+ auto Op1 = B.buildUnmerge({VgprRB_S32}, MI.getOperand(1).getReg());
+ auto Op2 = B.buildUnmerge({VgprRB_S32}, MI.getOperand(2).getReg());
+
+ // TODO: G_AMDGPU_MAD_* optimizations for G_MUL divergent S64 operation to
+ // match GlobalISel with old regbankselect.
+ auto Lo = B.buildMul({VgprRB_S32}, Op1.getReg(0), Op2.getReg(0));
+ auto Carry = B.buildUMulH({VgprRB_S32}, Op1.getReg(0), Op2.getReg(0));
+ auto MulLo0Hi1 = B.buildMul({VgprRB_S32}, Op1.getReg(0), Op2.getReg(1));
+ auto MulHi0Lo1 = B.buildMul({VgprRB_S32}, Op1.getReg(1), Op2.getReg(0));
+ auto Sum = B.buildAdd(VgprRB_S32, MulLo0Hi1, MulHi0Lo1);
+ auto Hi = B.buildAdd(VgprRB_S32, Sum, Carry);
+
+ B.buildMergeLikeInstr(Dst, {Lo, Hi});
+ MI.eraseFromParent();
+ return true;
+}
+
bool RegBankLegalizeHelper::lowerSplitTo32Select(MachineInstr &MI) {
Register Dst = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst);
@@ -1006,6 +1027,8 @@ bool RegBankLegalizeHelper::lower(MachineInstr &MI,
}
case SplitTo32:
return lowerSplitTo32(MI);
+ case SplitTo32Mul:
+ return lowerSplitTo32Mul(MI);
case SplitTo32Select:
return lowerSplitTo32Select(MI);
case SplitTo32SExtInReg:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
index f92ed3de6cf27..86669ae6ff6c7 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h
@@ -127,6 +127,7 @@ class RegBankLegalizeHelper {
bool lowerS_BFE(MachineInstr &MI);
bool lowerUniMAD64(MachineInstr &MI);
bool lowerSplitTo32(MachineInstr &MI);
+ bool lowerSplitTo32Mul(MachineInstr &MI);
bool lowerSplitTo16(MachineInstr &MI);
bool lowerSplitTo32Select(MachineInstr &MI);
bool lowerSplitTo32SExtInReg(MachineInstr &MI);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
index 5a03f6b5463ad..40f298ba51352 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp
@@ -487,13 +487,17 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST,
.Uni(S32, {{Sgpr32, Sgpr32Trunc}, {Sgpr32, Sgpr32, Sgpr32AExtBoolInReg}})
.Div(S32, {{Vgpr32, Vcc}, {Vgpr32, Vgpr32, Vcc}});
+ bool HasVecMulU64 = ST->hasVectorMulU64();
addRulesForGOpcs({G_MUL}, Standard)
.Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}})
.Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}})
.Uni(S16, {{UniInVgprS16}, {Vgpr16, Vgpr16}})
.Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}})
+ .Uni(S64, {{SgprB64}, {SgprB64, SgprB64}})
.Uni(V2S16, {{UniInVgprV2S16}, {VgprV2S16}})
- .Div(V2S16, {{VgprV2S16}, {VgprV2S16}});
+ .Div(V2S16, {{VgprV2S16}, {VgprV2S16}})
+ .Div(S64, {{VgprB64}, {VgprB64, VgprB64}}, HasVecMulU64)
+ .Div(S64, {{VgprB64}, {VgprB64, VgprB64}, SplitTo32Mul}, !HasVecMulU64);
bool hasMulHi = ST->hasScalarMulHiInsts();
addRulesForGOpcs({G_UMULH, G_SMULH}, Standard)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
index b5fd6683d319b..ce61e3cb22b9e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h
@@ -229,6 +229,7 @@ enum LoweringMethodID {
S_Mul64,
S_Mul64Div,
SplitTo32,
+ SplitTo32Mul,
ScalarizeToS16,
SplitTo32Select,
SplitTo32SExtInReg,
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
index 2d5585d12b823..991f11809f346 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul.ll
@@ -783,10 +783,11 @@ define i64 @v_mul_i64(i64 %num, i64 %den) {
; GFX12-NEXT: s_wait_bvhcnt 0x0
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_mul_hi_u32 v4, v0, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v3, v[4:5]
+; GFX12-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX12-NEXT: v_mul_lo_u32 v1, v1, v2
; GFX12-NEXT: v_mul_lo_u32 v0, v0, v2
-; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[3:4]
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-NEXT: v_add3_u32 v1, v3, v1, v4
; GFX12-NEXT: s_setpc_b64 s[30:31]
;
; GFX1250-LABEL: v_mul_i64:
@@ -1530,14 +1531,18 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX7-NEXT: s_cselect_b32 s33, 1, 0
; GFX7-NEXT: v_readfirstlane_b32 s35, v0
; GFX7-NEXT: s_add_u32 s19, s34, s19
-; GFX7-NEXT: v_mov_b32_e32 v0, s14
; GFX7-NEXT: s_addc_u32 s28, s35, s28
-; GFX7-NEXT: v_mul_hi_u32 v0, s16, v0
; GFX7-NEXT: s_cselect_b32 s34, 1, 0
+; GFX7-NEXT: s_cmp_lg_u32 s25, 0
+; GFX7-NEXT: v_mov_b32_e32 v0, s14
+; GFX7-NEXT: s_cselect_b32 s25, 1, 0
; GFX7-NEXT: s_cmp_lg_u32 s26, 0
+; GFX7-NEXT: v_mul_hi_u32 v0, s16, v0
; GFX7-NEXT: s_addc_u32 s19, s25, s19
-; GFX7-NEXT: v_mov_b32_e32 v2, s13
; GFX7-NEXT: s_cselect_b32 s25, 1, 0
+; GFX7-NEXT: s_cmp_lg_u32 s20, 0
+; GFX7-NEXT: v_mov_b32_e32 v2, s13
+; GFX7-NEXT: s_cselect_b32 s20, 1, 0
; GFX7-NEXT: s_cmp_lg_u32 s21, 0
; GFX7-NEXT: v_mul_hi_u32 v6, s1, v2
; GFX7-NEXT: s_addc_u32 s20, s20, 0
@@ -1613,6 +1618,8 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX7-NEXT: s_add_u32 s27, s39, s27
; GFX7-NEXT: s_addc_u32 s25, s40, s25
; GFX7-NEXT: s_cselect_b32 s39, 1, 0
+; GFX7-NEXT: s_cmp_lg_u32 s30, 0
+; GFX7-NEXT: s_cselect_b32 s30, 1, 0
; GFX7-NEXT: s_cmp_lg_u32 s31, 0
; GFX7-NEXT: s_addc_u32 s30, s30, 0
; GFX7-NEXT: s_cmp_lg_u32 s33, 0
@@ -1622,6 +1629,8 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX7-NEXT: s_cmp_lg_u32 s21, 0
; GFX7-NEXT: s_addc_u32 s21, s30, s27
; GFX7-NEXT: s_cselect_b32 s27, 1, 0
+; GFX7-NEXT: s_cmp_lg_u32 s22, 0
+; GFX7-NEXT: s_cselect_b32 s22, 1, 0
; GFX7-NEXT: s_cmp_lg_u32 s23, 0
; GFX7-NEXT: s_addc_u32 s22, s22, 0
; GFX7-NEXT: s_cmp_lg_u32 s24, 0
@@ -1751,14 +1760,18 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX8-NEXT: s_cselect_b32 s33, 1, 0
; GFX8-NEXT: v_readfirstlane_b32 s35, v0
; GFX8-NEXT: s_add_u32 s19, s34, s19
-; GFX8-NEXT: v_mov_b32_e32 v0, s14
; GFX8-NEXT: s_addc_u32 s28, s35, s28
-; GFX8-NEXT: v_mul_hi_u32 v0, s16, v0
; GFX8-NEXT: s_cselect_b32 s34, 1, 0
+; GFX8-NEXT: s_cmp_lg_u32 s25, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s14
+; GFX8-NEXT: s_cselect_b32 s25, 1, 0
; GFX8-NEXT: s_cmp_lg_u32 s26, 0
+; GFX8-NEXT: v_mul_hi_u32 v0, s16, v0
; GFX8-NEXT: s_addc_u32 s19, s25, s19
-; GFX8-NEXT: v_mov_b32_e32 v2, s13
; GFX8-NEXT: s_cselect_b32 s25, 1, 0
+; GFX8-NEXT: s_cmp_lg_u32 s20, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s13
+; GFX8-NEXT: s_cselect_b32 s20, 1, 0
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
; GFX8-NEXT: v_mul_hi_u32 v6, s1, v2
; GFX8-NEXT: s_addc_u32 s20, s20, 0
@@ -1834,6 +1847,8 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX8-NEXT: s_add_u32 s27, s39, s27
; GFX8-NEXT: s_addc_u32 s25, s40, s25
; GFX8-NEXT: s_cselect_b32 s39, 1, 0
+; GFX8-NEXT: s_cmp_lg_u32 s30, 0
+; GFX8-NEXT: s_cselect_b32 s30, 1, 0
; GFX8-NEXT: s_cmp_lg_u32 s31, 0
; GFX8-NEXT: s_addc_u32 s30, s30, 0
; GFX8-NEXT: s_cmp_lg_u32 s33, 0
@@ -1843,6 +1858,8 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
; GFX8-NEXT: s_addc_u32 s21, s30, s27
; GFX8-NEXT: s_cselect_b32 s27, 1, 0
+; GFX8-NEXT: s_cmp_lg_u32 s22, 0
+; GFX8-NEXT: s_cselect_b32 s22, 1, 0
; GFX8-NEXT: s_cmp_lg_u32 s23, 0
; GFX8-NEXT: s_addc_u32 s22, s22, 0
; GFX8-NEXT: s_cmp_lg_u32 s24, 0
@@ -1950,9 +1967,13 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX9-NEXT: s_add_u32 s19, s34, s19
; GFX9-NEXT: s_addc_u32 s24, s35, s24
; GFX9-NEXT: s_cselect_b32 s34, 1, 0
+; GFX9-NEXT: s_cmp_lg_u32 s22, 0
+; GFX9-NEXT: s_cselect_b32 s22, 1, 0
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
; GFX9-NEXT: s_addc_u32 s19, s22, s19
; GFX9-NEXT: s_cselect_b32 s22, 1, 0
+; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_cselect_b32 s20, 1, 0
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
; GFX9-NEXT: s_addc_u32 s20, s20, 0
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
@@ -2014,6 +2035,8 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX9-NEXT: s_add_u32 s24, s39, s24
; GFX9-NEXT: s_addc_u32 s22, s40, s22
; GFX9-NEXT: s_cselect_b32 s39, 1, 0
+; GFX9-NEXT: s_cmp_lg_u32 s30, 0
+; GFX9-NEXT: s_cselect_b32 s30, 1, 0
; GFX9-NEXT: s_cmp_lg_u32 s31, 0
; GFX9-NEXT: s_addc_u32 s30, s30, 0
; GFX9-NEXT: s_cmp_lg_u32 s33, 0
@@ -2023,6 +2046,8 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
; GFX9-NEXT: s_addc_u32 s21, s30, s24
; GFX9-NEXT: s_cselect_b32 s24, 1, 0
+; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_cselect_b32 s26, 1, 0
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
; GFX9-NEXT: s_addc_u32 s26, s26, 0
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
@@ -2129,12 +2154,18 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX10PLUS-NEXT: s_add_u32 s18, s33, s18
; GFX10PLUS-NEXT: s_addc_u32 s23, s34, s23
; GFX10PLUS-NEXT: s_cselect_b32 s33, 1, 0
+; GFX10PLUS-NEXT: s_cmp_lg_u32 s21, 0
+; GFX10PLUS-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX10PLUS-NEXT: s_cselect_b32 s21, 1, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s22, 0
; GFX10PLUS-NEXT: s_mul_hi_u32 s22, s0, s14
; GFX10PLUS-NEXT: s_addc_u32 s18, s21, s18
; GFX10PLUS-NEXT: s_cselect_b32 s21, 1, 0
+; GFX10PLUS-NEXT: s_cmp_lg_u32 s19, 0
+; GFX10PLUS-NEXT: s_mul_hi_u32 s35, s1, s12
+; GFX10PLUS-NEXT: s_cselect_b32 s19, 1, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s20, 0
-; GFX10PLUS-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX10PLUS-NEXT: s_mul_hi_u32 s36, s2, s11
; GFX10PLUS-NEXT: s_addc_u32 s19, s19, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s21, 0
; GFX10PLUS-NEXT: s_mul_i32 s21, s0, s14
@@ -2168,12 +2199,10 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX10PLUS-NEXT: s_add_u32 s23, s23, s24
; GFX10PLUS-NEXT: s_addc_u32 s21, s34, s21
; GFX10PLUS-NEXT: s_mul_i32 s34, s1, s12
-; GFX10PLUS-NEXT: s_mul_hi_u32 s35, s1, s12
; GFX10PLUS-NEXT: s_cselect_b32 s24, 1, 0
; GFX10PLUS-NEXT: s_add_u32 s23, s34, s23
; GFX10PLUS-NEXT: s_addc_u32 s21, s35, s21
; GFX10PLUS-NEXT: s_mul_i32 s35, s2, s11
-; GFX10PLUS-NEXT: s_mul_hi_u32 s36, s2, s11
; GFX10PLUS-NEXT: s_cselect_b32 s34, 1, 0
; GFX10PLUS-NEXT: s_add_u32 s23, s35, s23
; GFX10PLUS-NEXT: s_addc_u32 s21, s36, s21
@@ -2193,34 +2222,38 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX10PLUS-NEXT: s_add_u32 s23, s38, s23
; GFX10PLUS-NEXT: s_addc_u32 s21, s39, s21
; GFX10PLUS-NEXT: s_cselect_b32 s38, 1, 0
-; GFX10PLUS-NEXT: s_cmp_lg_u32 s30, 0
+; GFX10PLUS-NEXT: s_cmp_lg_u32 s29, 0
; GFX10PLUS-NEXT: s_mul_i32 s1, s1, s14
+; GFX10PLUS-NEXT: s_cselect_b32 s29, 1, 0
+; GFX10PLUS-NEXT: s_cmp_lg_u32 s30, 0
+; GFX10PLUS-NEXT: s_mul_i32 s2, s2, s13
; GFX10PLUS-NEXT: s_addc_u32 s29, s29, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s31, 0
-; GFX10PLUS-NEXT: s_mul_i32 s2, s2, s13
+; GFX10PLUS-NEXT: s_mul_i32 s3, s3, s12
; GFX10PLUS-NEXT: s_addc_u32 s29, s29, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s33, 0
-; GFX10PLUS-NEXT: s_mul_i32 s3, s3, s12
+; GFX10PLUS-NEXT: s_mul_i32 s4, s4, s11
; GFX10PLUS-NEXT: s_addc_u32 s29, s29, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s20, 0
-; GFX10PLUS-NEXT: s_mul_i32 s4, s4, s11
+; GFX10PLUS-NEXT: s_mul_i32 s5, s5, s10
; GFX10PLUS-NEXT: s_addc_u32 s20, s29, s23
; GFX10PLUS-NEXT: s_cselect_b32 s23, 1, 0
+; GFX10PLUS-NEXT: s_cmp_lg_u32 s25, 0
+; GFX10PLUS-NEXT: s_mul_i32 s6, s6, s9
+; GFX10PLUS-NEXT: s_cselect_b32 s25, 1, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s26, 0
; GFX10PLUS-NEXT: s_mul_i32 s26, s0, s15
; GFX10PLUS-NEXT: s_addc_u32 s25, s25, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s27, 0
-; GFX10PLUS-NEXT: s_mul_i32 s5, s5, s10
+; GFX10PLUS-NEXT: s_mul_i32 s7, s7, s8
; GFX10PLUS-NEXT: s_addc_u32 s25, s25, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s28, 0
-; GFX10PLUS-NEXT: s_mul_i32 s6, s6, s9
+; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s8
; GFX10PLUS-NEXT: s_addc_u32 s25, s25, 0
; GFX10PLUS-NEXT: s_cmp_lg_u32 s23, 0
-; GFX10PLUS-NEXT: s_mul_i32 s7, s7, s8
; GFX10PLUS-NEXT: s_addc_u32 s15, s25, s21
; GFX10PLUS-NEXT: s_addc_u32 s21, s22, s26
; GFX10PLUS-NEXT: s_cmp_lg_u32 s38, 0
-; GFX10PLUS-NEXT: s_mul_i32 s0, s0, s8
; GFX10PLUS-NEXT: s_addc_u32 s1, s21, s1
; GFX10PLUS-NEXT: s_cmp_lg_u32 s37, 0
; GFX10PLUS-NEXT: s_addc_u32 s1, s1, s2
@@ -2308,12 +2341,18 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX12-NEXT: s_add_co_u32 s18, s33, s18
; GFX12-NEXT: s_add_co_ci_u32 s23, s34, s23
; GFX12-NEXT: s_cselect_b32 s33, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s21, 0
+; GFX12-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX12-NEXT: s_cselect_b32 s21, 1, 0
; GFX12-NEXT: s_cmp_lg_u32 s22, 0
; GFX12-NEXT: s_mul_hi_u32 s22, s0, s14
; GFX12-NEXT: s_add_co_ci_u32 s18, s21, s18
; GFX12-NEXT: s_cselect_b32 s21, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s19, 0
+; GFX12-NEXT: s_mul_hi_u32 s35, s1, s12
+; GFX12-NEXT: s_cselect_b32 s19, 1, 0
; GFX12-NEXT: s_cmp_lg_u32 s20, 0
-; GFX12-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX12-NEXT: s_mul_hi_u32 s36, s2, s11
; GFX12-NEXT: s_add_co_ci_u32 s19, s19, 0
; GFX12-NEXT: s_cmp_lg_u32 s21, 0
; GFX12-NEXT: s_mul_i32 s21, s0, s14
@@ -2347,12 +2386,10 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX12-NEXT: s_add_co_u32 s23, s23, s24
; GFX12-NEXT: s_add_co_ci_u32 s21, s34, s21
; GFX12-NEXT: s_mul_i32 s34, s1, s12
-; GFX12-NEXT: s_mul_hi_u32 s35, s1, s12
; GFX12-NEXT: s_cselect_b32 s24, 1, 0
; GFX12-NEXT: s_add_co_u32 s23, s34, s23
; GFX12-NEXT: s_add_co_ci_u32 s21, s35, s21
; GFX12-NEXT: s_mul_i32 s35, s2, s11
-; GFX12-NEXT: s_mul_hi_u32 s36, s2, s11
; GFX12-NEXT: s_cselect_b32 s34, 1, 0
; GFX12-NEXT: s_add_co_u32 s23, s35, s23
; GFX12-NEXT: s_add_co_ci_u32 s21, s36, s21
@@ -2372,34 +2409,38 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX12-NEXT: s_add_co_u32 s23, s38, s23
; GFX12-NEXT: s_add_co_ci_u32 s21, s39, s21
; GFX12-NEXT: s_cselect_b32 s38, 1, 0
-; GFX12-NEXT: s_cmp_lg_u32 s30, 0
+; GFX12-NEXT: s_cmp_lg_u32 s29, 0
; GFX12-NEXT: s_mul_i32 s1, s1, s14
+; GFX12-NEXT: s_cselect_b32 s29, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s30, 0
+; GFX12-NEXT: s_mul_i32 s2, s2, s13
; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0
; GFX12-NEXT: s_cmp_lg_u32 s31, 0
-; GFX12-NEXT: s_mul_i32 s2, s2, s13
+; GFX12-NEXT: s_mul_i32 s3, s3, s12
; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0
; GFX12-NEXT: s_cmp_lg_u32 s33, 0
-; GFX12-NEXT: s_mul_i32 s3, s3, s12
+; GFX12-NEXT: s_mul_i32 s4, s4, s11
; GFX12-NEXT: s_add_co_ci_u32 s29, s29, 0
; GFX12-NEXT: s_cmp_lg_u32 s20, 0
-; GFX12-NEXT: s_mul_i32 s4, s4, s11
+; GFX12-NEXT: s_mul_i32 s5, s5, s10
; GFX12-NEXT: s_add_co_ci_u32 s20, s29, s23
; GFX12-NEXT: s_cselect_b32 s23, 1, 0
+; GFX12-NEXT: s_cmp_lg_u32 s25, 0
+; GFX12-NEXT: s_mul_i32 s6, s6, s9
+; GFX12-NEXT: s_cselect_b32 s25, 1, 0
; GFX12-NEXT: s_cmp_lg_u32 s26, 0
; GFX12-NEXT: s_mul_i32 s26, s0, s15
; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0
; GFX12-NEXT: s_cmp_lg_u32 s27, 0
-; GFX12-NEXT: s_mul_i32 s5, s5, s10
+; GFX12-NEXT: s_mul_i32 s7, s7, s8
; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0
; GFX12-NEXT: s_cmp_lg_u32 s28, 0
-; GFX12-NEXT: s_mul_i32 s6, s6, s9
+; GFX12-NEXT: s_mul_i32 s0, s0, s8
; GFX12-NEXT: s_add_co_ci_u32 s25, s25, 0
; GFX12-NEXT: s_cmp_lg_u32 s23, 0
-; GFX12-NEXT: s_mul_i32 s7, s7, s8
; GFX12-NEXT: s_add_co_ci_u32 s15, s25, s21
; GFX12-NEXT: s_add_co_ci_u32 s21, s22, s26
; GFX12-NEXT: s_cmp_lg_u32 s38, 0
-; GFX12-NEXT: s_mul_i32 s0, s0, s8
; GFX12-NEXT: s_add_co_ci_u32 s1, s21, s1
; GFX12-NEXT: s_cmp_lg_u32 s37, 0
; GFX12-NEXT: s_add_co_ci_u32 s1, s1, s2
@@ -2488,12 +2529,18 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX1250-NEXT: s_add_co_u32 s18, s33, s18
; GFX1250-NEXT: s_add_co_ci_u32 s23, s34, s23
; GFX1250-NEXT: s_cselect_b32 s33, 1, 0
+; GFX1250-NEXT: s_cmp_lg_u32 s21, 0
+; GFX1250-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX1250-NEXT: s_cselect_b32 s21, 1, 0
; GFX1250-NEXT: s_cmp_lg_u32 s22, 0
; GFX1250-NEXT: s_mul_hi_u32 s22, s0, s14
; GFX1250-NEXT: s_add_co_ci_u32 s18, s21, s18
; GFX1250-NEXT: s_cselect_b32 s21, 1, 0
+; GFX1250-NEXT: s_cmp_lg_u32 s19, 0
+; GFX1250-NEXT: s_mul_hi_u32 s35, s1, s12
+; GFX1250-NEXT: s_cselect_b32 s19, 1, 0
; GFX1250-NEXT: s_cmp_lg_u32 s20, 0
-; GFX1250-NEXT: s_mul_hi_u32 s34, s1, s13
+; GFX1250-NEXT: s_mul_hi_u32 s36, s2, s11
; GFX1250-NEXT: s_add_co_ci_u32 s19, s19, 0
; GFX1250-NEXT: s_cmp_lg_u32 s21, 0
; GFX1250-NEXT: s_mul_i32 s21, s0, s14
@@ -2527,12 +2574,10 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX1250-NEXT: s_add_co_u32 s23, s23, s24
; GFX1250-NEXT: s_add_co_ci_u32 s21, s34, s21
; GFX1250-NEXT: s_mul_i32 s34, s1, s12
-; GFX1250-NEXT: s_mul_hi_u32 s35, s1, s12
; GFX1250-NEXT: s_cselect_b32 s24, 1, 0
; GFX1250-NEXT: s_add_co_u32 s23, s34, s23
; GFX1250-NEXT: s_add_co_ci_u32 s21, s35, s21
; GFX1250-NEXT: s_mul_i32 s35, s2, s11
-; GFX1250-NEXT: s_mul_hi_u32 s36, s2, s11
; GFX1250-NEXT: s_cselect_b32 s34, 1, 0
; GFX1250-NEXT: s_add_co_u32 s23, s35, s23
; GFX1250-NEXT: s_add_co_ci_u32 s21, s36, s21
@@ -2552,34 +2597,38 @@ define amdgpu_ps <8 x i32> @s_mul_i256(i256 inreg %num, i256 inreg %den) {
; GFX1250-NEXT: s_add_co_u32 s23, s38, s23
; GFX1250-NEXT: s_add_co_ci_u32 s21, s39, s21
; GFX1250-NEXT: s_cselect_b32 s38, 1, 0
-; GFX1250-NEXT: s_cmp_lg_u32 s30, 0
+; GFX1250-NEXT: s_cmp_lg_u32 s29, 0
; GFX1250-NEXT: s_mul_i32 s1, s1, s14
+; GFX1250-NEXT: s_cselect_b32 s29, 1, 0
+; GFX1250-NEXT: s_cmp_lg_u32 s30, 0
+; GFX1250-NEXT: s_mul_i32 s2, s2, s13
; GFX1250-NEXT: s_add_co_ci_u32 s29, s29, 0
; GFX1250-NEXT: s_cmp_lg_u32 s31, 0
-; GFX1250-NEXT: s_mul_i32 s2, s2, s13
+; GFX1250-NEXT: s_mul_i32 s3, s3, s12
; GFX1250-NEXT: s_add_co_ci_u32 s29, s29, 0
; GFX1250-NEXT: s_cmp_lg_u32 s33, 0
-; GFX1250-NEXT: s_mul_i32 s3, s3, s12
+; GFX1250-NEXT: s_mul_i32 s4, s4, s11
; GFX1250-NEXT: s_add_co_ci_u32 s29, s29, 0
; GFX1250-NEXT: s_cmp_lg_u32 s20, 0
-; GFX1250-NEXT: s_mul_i32 s4, s4, s11
+; GFX1250-NEXT: s_mul_i32 s5, s5, s10
; GFX1250-NEXT: s_add_co_ci_u32 s20, s29, s23
; GFX1250-NEXT: s_cselect_b32 s23, 1, 0
+; GFX1250-NEXT: s_cmp_lg_u32 s25, 0
+; GFX1250-NEXT: s_mul_i32 s6, s6, s9
+; GFX1250-NEXT: s_cselect_b32 s25, 1, 0
; GFX1250-NEXT: s_cmp_lg_u32 s26, 0
; GFX1250-NEXT: s_mul_i32 s26, s0, s15
; GFX1250-NEXT: s_add_co_ci_u32 s25, s25, 0
; GFX1250-NEXT: s_cmp_lg_u32 s27, 0
-; GFX1250-NEXT: s_mul_i32 s5, s5, s10
+; GFX1250-NEXT: s_mul_i32 s7, s7, s8
; GFX1250-NEXT: s_add_co_ci_u32 s25, s25, 0
; GFX1250-NEXT: s_cmp_lg_u32 s28, 0
-; GFX1250-NEXT: s_mul_i32 s6, s6, s9
+; GFX1250-NEXT: s_mul_i32 s0, s0, s8
; GFX1250-NEXT: s_add_co_ci_u32 s25, s25, 0
; GFX1250-NEXT: s_cmp_lg_u32 s23, 0
-; GFX1250-NEXT: s_mul_i32 s7, s7, s8
; GFX1250-NEXT: s_add_co_ci_u32 s15, s25, s21
; GFX1250-NEXT: s_add_co_ci_u32 s21, s22, s26
; GFX1250-NEXT: s_cmp_lg_u32 s38, 0
-; GFX1250-NEXT: s_mul_i32 s0, s0, s8
; GFX1250-NEXT: s_add_co_ci_u32 s1, s21, s1
; GFX1250-NEXT: s_cmp_lg_u32 s37, 0
; GFX1250-NEXT: s_add_co_ci_u32 s1, s1, s2
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir
index 6b91707328dba..3957f2daa6475 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mul.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-fast | FileCheck %s
+# RUN: llc -mtriple=amdgcn -mcpu=fiji -run-pass='amdgpu-regbankselect,amdgpu-regbanklegalize' %s -o - | FileCheck %s
---
name: mul_s32_ss
@@ -107,13 +107,13 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
; CHECK-NEXT: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; CHECK-NEXT: [[UV2:%[0-9]+]]:vgpr(s32), [[UV3:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV2]]
; CHECK-NEXT: [[UMULH:%[0-9]+]]:vgpr(s32) = G_UMULH [[UV]], [[UV2]]
- ; CHECK-NEXT: [[MUL:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV3]]
- ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[UMULH]], [[MUL]]
- ; CHECK-NEXT: [[MUL1:%[0-9]+]]:vgpr(s32) = G_MUL [[UV1]], [[UV2]]
- ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[ADD]], [[MUL1]]
- ; CHECK-NEXT: [[MUL2:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV2]]
- ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[MUL2]](s32), [[ADD1]](s32)
+ ; CHECK-NEXT: [[MUL1:%[0-9]+]]:vgpr(s32) = G_MUL [[UV]], [[UV3]]
+ ; CHECK-NEXT: [[MUL2:%[0-9]+]]:vgpr(s32) = G_MUL [[UV1]], [[UV2]]
+ ; CHECK-NEXT: [[ADD:%[0-9]+]]:vgpr(s32) = G_ADD [[MUL1]], [[MUL2]]
+ ; CHECK-NEXT: [[ADD1:%[0-9]+]]:vgpr(s32) = G_ADD [[ADD]], [[UMULH]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[MUL]](s32), [[ADD1]](s32)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_MUL %0, %1
@@ -129,9 +129,9 @@ body: |
; CHECK-LABEL: name: mul_s64_zext_ss
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr0_sgpr1
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_MUL_U64_:%[0-9]+]]:sgpr_64(s64) = S_MUL_U64 [[COPY]](s64), [[COPY1]](s64)
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:sgpr(s64) = G_MUL [[COPY]], [[COPY1]]
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = COPY $sgpr2_sgpr3
%2:_(s64) = G_AMDGPU_S_MUL_U64_U32 %0, %1
@@ -149,10 +149,10 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY1]](s64)
- ; CHECK-NEXT: [[C:%[0-9]+]]:vreg_64(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:vreg_64 = G_AMDGPU_MAD_U64_U32 [[TRUNC]](s32), [[TRUNC1]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:sgpr(s32) = G_AMDGPU_MAD_U64_U32 [[TRUNC]](s32), [[TRUNC1]], [[C]]
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_AMDGPU_S_MUL_U64_U32 %0, %1
@@ -168,9 +168,9 @@ body: |
; CHECK-LABEL: name: mul_s64_sext_ss
; CHECK: liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr0_sgpr1
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64(s64) = COPY $sgpr2_sgpr3
- ; CHECK-NEXT: [[S_MUL_U64_:%[0-9]+]]:sgpr_64(s64) = S_MUL_U64 [[COPY]](s64), [[COPY1]](s64)
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr(s64) = COPY $sgpr2_sgpr3
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:sgpr(s64) = G_MUL [[COPY]], [[COPY1]]
%0:_(s64) = COPY $sgpr0_sgpr1
%1:_(s64) = COPY $sgpr2_sgpr3
%2:_(s64) = G_AMDGPU_S_MUL_I64_I32 %0, %1
@@ -188,10 +188,10 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr(s64) = COPY $vgpr2_vgpr3
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr_32(s32) = G_TRUNC [[COPY1]](s64)
- ; CHECK-NEXT: [[C:%[0-9]+]]:vreg_64(s64) = G_CONSTANT i64 0
- ; CHECK-NEXT: [[AMDGPU_MAD_I64_I32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_I64_I32_1:%[0-9]+]]:vreg_64 = G_AMDGPU_MAD_I64_I32 [[TRUNC]](s32), [[TRUNC1]], [[C]]
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:vgpr(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:vgpr(s64) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[AMDGPU_MAD_I64_I32_:%[0-9]+]]:vgpr(s64), [[AMDGPU_MAD_I64_I32_1:%[0-9]+]]:sgpr(s32) = G_AMDGPU_MAD_I64_I32 [[TRUNC]](s32), [[TRUNC1]], [[C]]
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_AMDGPU_S_MUL_I64_I32 %0, %1
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index 69bd0687b71af..91ca4f8c5c81a 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -1,34 +1,34 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6,GFX67-SDAG,GFX6-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6,GFX67-GISEL,GFX6-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx600 < %s | FileCheck -check-prefixes=GFX67,GFX6,GFX67-GISEL,GFX6-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7,GFX67-SDAG,GFX7-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7,GFX67-GISEL,GFX7-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX67,GFX7,GFX67-GISEL,GFX7-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8,GFX8-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx803 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG,GFX900-SDAG,GFX900 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL,GFX900-GISEL,GFX900 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL,GFX900-GISEL,GFX900 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A,GFX9-SDAG,GFX90A-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A,GFX9-GISEL,GFX90A-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx90a < %s | FileCheck -check-prefixes=GFX9,GFX90A,GFX9-GISEL,GFX90A-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX10-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-TRUE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-FAKE16 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-SDAG,GFX1200-SDAG-TRUE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-SDAG,GFX1200-SDAG-FAKE16 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-TRUE16 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-FAKE16 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-TRUE16 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1200,GFX1200-GISEL,GFX1200-GISEL-FAKE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn-amd-amdpal -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-GISEL %s
; Test for integer mad formation for patterns used in clpeak
@@ -6117,41 +6117,46 @@ define i64 @clpeak_imad_pat_i64(i64 %x, i64 %y) {
; GFX1200-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX1200-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX1200-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v0, 1
+; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v1, vcc_lo
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v4, v2
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v6, v4, v2
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v4, v3, v[0:1]
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v6, v4
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v4, v2
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v5, v2, v[0:1]
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v1, v4, v2
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v4, v0, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v0, v3
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v6, v1, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v0, v2
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1200-GISEL-NEXT: v_add3_u32 v4, v5, v6, v4
+; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v7, v0
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, v0, v5, vcc_lo
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v4, v3, v[1:2]
-; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v6, 1
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v4, v1, vcc_lo
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v5, v0, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v7, 1
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v6, null, 0, v0, vcc_lo
-; GFX1200-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v7, 1
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v7, v4
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v5, v2, v[3:4]
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v7, v4
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v7, v6, v[0:1]
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v4, null, 0, v4, vcc_lo
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v4, v0, v4
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1200-GISEL-NEXT: v_add3_u32 v1, v3, v1, v5
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v3, v0, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v1, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v0, v2
+; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v1, vcc_lo
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v5, v8
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v4, v[2:3]
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v5, v3, v[0:1]
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v5, v8
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-GISEL-NEXT: v_add3_u32 v3, v4, v5, v3
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v4, v2, v0
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v2, v1
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v3, v0
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v2, v0
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v8, v[2:3]
+; GFX1200-GISEL-NEXT: v_add3_u32 v1, v1, v3, v4
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1250-SDAG-LABEL: clpeak_imad_pat_i64:
@@ -7006,73 +7011,73 @@ define <2 x i64> @clpeak_imad_pat_v2i64(<2 x i64> %x, <2 x i64> %y) {
; GFX1200-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX1200-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX1200-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1200-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v0, 1
-; GFX1200-GISEL-NEXT: v_add_co_u32 v9, s0, v2, 1
+; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v10, null, 0, v1, vcc_lo
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v8, v4
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v1, v9, v6
-; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_sdst(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v11, null, 0, v3, s0
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v12, v8, v4
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v13, v9, v6
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v8, v5, v[0:1]
-; GFX1200-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v12, v8
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_add_co_u32 v15, s0, v13, v9
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v9, v7, v[1:2]
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v4, v[2:3]
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v11, v6, v[0:1]
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v14, v4
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v3, v15, v6
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, 1
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v16, null, v1, v10, vcc_lo
-; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_sdst(0)
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v8, v0, v4
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v9, v0, v5
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v10, v1, v4
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v11, v2, v6
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v12, v2, v7
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v13, v3, v6
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v14, v0, v4
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v15, v2, v6
+; GFX1200-GISEL-NEXT: v_add3_u32 v8, v9, v10, v8
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v11, null, v2, v11, s0
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[8:9], null, v14, v5, v[0:1]
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[9:10], null, v15, v7, v[3:4]
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v10, v15, v6
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v14, v4
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v16, v4, v[8:9]
-; GFX1200-GISEL-NEXT: v_add_co_u32 v8, vcc_lo, v12, 1
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[4:5], null, v11, v6, v[9:10]
-; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v1, vcc_lo
-; GFX1200-GISEL-NEXT: v_add_co_u32 v9, vcc_lo, v13, 1
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v7, v8
+; GFX1200-GISEL-NEXT: v_add3_u32 v9, v12, v13, v11
+; GFX1200-GISEL-NEXT: v_add_co_u32 v0, vcc_lo, v14, v0
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v2, null, 0, v2, vcc_lo
; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v1, v10, v9
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v15, v10, v9
-; GFX1200-GISEL-NEXT: v_add_co_u32 v12, vcc_lo, v7, 1
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, v8, v1, vcc_lo
+; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v15, v2
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v13, null, 0, v3, vcc_lo
-; GFX1200-GISEL-NEXT: v_add_co_u32 v14, vcc_lo, v10, 1
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v11, v7, v8
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v7, v5, v[0:1]
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v2, v[1:2]
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v2, v15, v14
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, v9, v3, vcc_lo
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v10, v0, v4
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v0, v5
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v11, v2, v6
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v2, v7
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v3, v6
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX1200-GISEL-NEXT: v_add_co_u32 v4, vcc_lo, v14, 1
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX1200-GISEL-NEXT: v_add3_u32 v1, v5, v1, v10
+; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v5, null, 0, v8, vcc_lo
+; GFX1200-GISEL-NEXT: v_add_co_u32 v6, vcc_lo, v15, 1
+; GFX1200-GISEL-NEXT: v_add3_u32 v3, v7, v3, v11
; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
-; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v10, null, 0, v4, vcc_lo
-; GFX1200-GISEL-NEXT: v_mul_hi_u32 v0, v11, v12
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v3, v8, v[5:6]
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v4, v9, v[1:2]
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v11, v13, v[0:1]
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v11, v12
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v15, v10, v[2:3]
-; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v5, v12, v[6:7]
-; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v15, v14
-; GFX1200-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v3, v14, v[7:8]
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v7, null, 0, v9, vcc_lo
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v8, v0, v4
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v0, v5
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v9, v1, v4
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v11, v2, v6
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v7, v2, v7
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v12, v3, v6
+; GFX1200-GISEL-NEXT: v_add_co_u32 v10, vcc_lo, v0, 1
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX1200-GISEL-NEXT: v_add3_u32 v4, v5, v9, v8
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v5, v2, v6
+; GFX1200-GISEL-NEXT: v_add_co_u32 v2, vcc_lo, v2, 1
+; GFX1200-GISEL-NEXT: s_wait_alu depctr_va_vcc(0)
+; GFX1200-GISEL-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
+; GFX1200-GISEL-NEXT: v_add3_u32 v6, v7, v12, v11
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v7, v0, v10
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v1, v0, v1
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v4, v4, v10
+; GFX1200-GISEL-NEXT: v_mul_hi_u32 v8, v5, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v3, v5, v3
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v6, v6, v2
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v0, v0, v10
+; GFX1200-GISEL-NEXT: v_mul_lo_u32 v2, v5, v2
+; GFX1200-GISEL-NEXT: v_add3_u32 v1, v1, v4, v7
+; GFX1200-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX1200-GISEL-NEXT: v_add3_u32 v3, v3, v6, v8
; GFX1200-GISEL-NEXT: s_setpc_b64 s[30:31]
;
; GFX1250-SDAG-LABEL: clpeak_imad_pat_v2i64:
diff --git a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
index 94448411cfd0e..c381d5b88e6ab 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-reduce-mul.ll
@@ -1,21 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX7,GFX7-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX7,GFX7-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx700 < %s | FileCheck -check-prefixes=GFX7,GFX7-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GFX8,GFX8-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx801 < %s | FileCheck -check-prefixes=GFX8,GFX8-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX9,GFX9-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx942 < %s | FileCheck -check-prefixes=GFX9,GFX9-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GFX10,GFX10-GISEL %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-TRUE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-SDAG,GFX11-SDAG-FAKE16 %s
; FIXME-TRUE16. enable gisel
-; XUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s
+; XUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-TRUE16 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-GISEL,GFX11-GISEL-FAKE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-TRUE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-FAKE16 %s
-; XUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-TRUE16 %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-FAKE16 %s
+; XUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-TRUE16 %s
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-FAKE16 %s
define i8 @test_vector_reduce_mul_v2i8(<2 x i8> %v) {
; GFX7-SDAG-LABEL: test_vector_reduce_mul_v2i8:
@@ -993,33 +993,19 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX8-NEXT: v_mul_lo_u16_sdwa v0, v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
-; GFX9-SDAG-LABEL: test_vector_reduce_mul_v2i16:
-; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_alignbit_b32 v1, s0, v0, 16
-; GFX9-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-GISEL-LABEL: test_vector_reduce_mul_v2i16:
-; GFX9-GISEL: ; %bb.0: ; %entry
-; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX10-SDAG-LABEL: test_vector_reduce_mul_v2i16:
-; GFX10-SDAG: ; %bb.0: ; %entry
-; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_alignbit_b32 v1, s4, v0, 16
-; GFX10-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_vector_reduce_mul_v2i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_alignbit_b32 v1, s0, v0, 16
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX10-GISEL-LABEL: test_vector_reduce_mul_v2i16:
-; GFX10-GISEL: ; %bb.0: ; %entry
-; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX10-LABEL: test_vector_reduce_mul_v2i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_alignbit_b32 v1, s4, v0, 16
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_mul_v2i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
@@ -1042,7 +1028,7 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX11-GISEL-LABEL: test_vector_reduce_mul_v2i16:
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -1080,7 +1066,7 @@ define i16 @test_vector_reduce_mul_v2i16(<2 x i16> %v) {
; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -1262,39 +1248,22 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX8-GISEL-NEXT: v_mul_lo_u16_e32 v0, v2, v0
; GFX8-GISEL-NEXT: s_setpc_b64 s[30:31]
;
-; GFX9-SDAG-LABEL: test_vector_reduce_mul_v4i16:
-; GFX9-SDAG: ; %bb.0: ; %entry
-; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX9-SDAG-NEXT: s_nop 0
-; GFX9-SDAG-NEXT: v_alignbit_b32 v1, s0, v0, 16
-; GFX9-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX9-SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-GISEL-LABEL: test_vector_reduce_mul_v4i16:
-; GFX9-GISEL: ; %bb.0: ; %entry
-; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX9-GISEL-NEXT: s_nop 0
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX10-SDAG-LABEL: test_vector_reduce_mul_v4i16:
-; GFX10-SDAG: ; %bb.0: ; %entry
-; GFX10-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-SDAG-NEXT: v_alignbit_b32 v1, s4, v0, 16
-; GFX10-SDAG-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-SDAG-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_vector_reduce_mul_v4i16:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: v_alignbit_b32 v1, s0, v0, 16
+; GFX9-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX10-GISEL-LABEL: test_vector_reduce_mul_v4i16:
-; GFX10-GISEL: ; %bb.0: ; %entry
-; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
-; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
+; GFX10-LABEL: test_vector_reduce_mul_v4i16:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: v_alignbit_b32 v1, s4, v0, 16
+; GFX10-NEXT: v_pk_mul_lo_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-SDAG-TRUE16-LABEL: test_vector_reduce_mul_v4i16:
; GFX11-SDAG-TRUE16: ; %bb.0: ; %entry
@@ -1322,7 +1291,7 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -1364,7 +1333,7 @@ define i16 @test_vector_reduce_mul_v4i16(<4 x i16> %v) {
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -1443,7 +1412,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX9-GISEL-NEXT: s_nop 0
; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX9-GISEL-NEXT: s_nop 0
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -1463,7 +1432,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-GISEL-NEXT: v_alignbit_b32 v1, s4, v0, 16
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -1500,7 +1469,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -1550,7 +1519,7 @@ define i16 @test_vector_reduce_mul_v8i16(<8 x i16> %v) {
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -1670,7 +1639,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX9-GISEL-NEXT: s_nop 0
; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX9-GISEL-NEXT: s_nop 0
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX9-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -1698,7 +1667,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v2
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX10-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX10-GISEL-NEXT: v_alignbit_b32 v1, s4, v0, 16
; GFX10-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX10-GISEL-NEXT: s_setpc_b64 s[30:31]
;
@@ -1750,7 +1719,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX11-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX11-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX11-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX11-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -1815,7 +1784,7 @@ define i16 @test_vector_reduce_mul_v16i16(<16 x i16> %v) {
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v1, v1, v3
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
-; GFX12-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX12-GISEL-NEXT: v_alignbit_b32 v1, s0, v0, 16
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-GISEL-NEXT: v_pk_mul_lo_u16 v0, v0, v1
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -2594,10 +2563,11 @@ define i64 @test_vector_reduce_mul_v2i64(<2 x i64> %v) {
; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v0, v2
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v3, v[4:5]
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[3:4]
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v4
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%res = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> %v)
@@ -2767,16 +2737,17 @@ define i64 @test_vector_reduce_mul_v3i64(<3 x i64> %v) {
; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX12-GISEL-NEXT: v_mul_hi_u32 v6, v0, v2
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v8, v0, v2
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v0, v3, v[6:7]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v8, v4
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v2, v[6:7]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v8, v5, v[0:1]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v8, v4
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v4, v[2:3]
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v6
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v0, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v5
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v2
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%res = call i64 @llvm.vector.reduce.mul.v3i64(<3 x i64> %v)
@@ -2989,22 +2960,24 @@ define i64 @test_vector_reduce_mul_v4i64(<4 x i64> %v) {
; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v2, v6
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v9, v0, v4
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v10, v0, v4
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v11, v2, v6
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v0, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v0, v5
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v9, v2, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v2, v7
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-GISEL-NEXT: v_add3_u32 v3, v7, v3, v9
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v5, v1, v8
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v2, v7, v[8:9]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[8:9], null, v0, v5, v[9:10]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v10, v11
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v3, v6, v[7:8]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v1, v4, v[8:9]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v10, v2, v[0:1]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v10, v11
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v11, v[1:2]
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v0, v2
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v3
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v2
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v2
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v4
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%res = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> %v)
@@ -3389,40 +3362,44 @@ define i64 @test_vector_reduce_mul_v8i64(<8 x i64> %v) {
; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v16, v0, v8
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v17, v6, v14
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v21, v0, v8
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v22, v2, v10
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v23, v6, v14
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[18:19], null, v0, v9, v[16:17]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v9, v2, v10
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v4, v12
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[15:16], null, v6, v15, v[17:18]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[16:17], null, v2, v11, v[9:10]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[19:20], null, v4, v13, v[0:1]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v4, v12
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v22, v23
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v7, v14, v[15:16]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v3, v10, v[16:17]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v5, v12, v[19:20]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v21, v9
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v21, v9
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v22, v6, v[0:1]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v22, v23
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v1, v8, v[18:19]
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v16, v0, v8
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v17, v0, v8
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v9
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v8
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v2, v10
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v2, v11
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v11, v4, v12
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v13, v4, v13
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v5, v12
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v18, v6, v14
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v15, v6, v15
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v7, v14
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v10
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v4, v12
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v10
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v6, v14
+; GFX12-GISEL-NEXT: v_add3_u32 v5, v13, v5, v11
+; GFX12-GISEL-NEXT: v_add3_u32 v0, v0, v1, v17
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v15, v7, v18
+; GFX12-GISEL-NEXT: v_add3_u32 v3, v9, v3, v8
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v7, v16, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v16, v5
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v2, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v2, v1
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v16, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v6
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v1, v3, v8
+; GFX12-GISEL-NEXT: v_add3_u32 v0, v5, v0, v7
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v21, v3, v[4:5]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v2, v23, v[5:6]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v7, v6
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v3, v4, v2
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v4, v1
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v0, v9, v[3:4]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v7, v6
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v7, v1, v[2:3]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v6, v[1:2]
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v0, v2
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v4, v2
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v1, v5, v3
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%res = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> %v)
@@ -4171,75 +4148,90 @@ define i64 @test_vector_reduce_mul_v16i64(<16 x i64> %v) {
; GFX12-GISEL-NEXT: s_wait_samplecnt 0x0
; GFX12-GISEL-NEXT: s_wait_bvhcnt 0x0
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: scratch_load_b32 v39, off, s32
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v31, v0, v16
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v32, v2, v18
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v33, v4, v20
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v34, v6, v22
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v35, v8, v24
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v36, v10, v26
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v37, v12, v28
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v38, v14, v30
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v51, v2, v18
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v53, v6, v22
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v55, v10, v26
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v65, v14, v30
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[48:49], null, v0, v17, v[31:32]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[31:32], null, v2, v19, v[32:33]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[32:33], null, v4, v21, v[33:34]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[33:34], null, v6, v23, v[34:35]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[34:35], null, v8, v25, v[35:36]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[35:36], null, v10, v27, v[36:37]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[36:37], null, v12, v29, v[37:38]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v50, v0, v16
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v52, v4, v20
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v54, v8, v24
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v64, v12, v28
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v23, v51, v55
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v27, v53, v65
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v6, v53, v65
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v7, v22, v[33:34]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[16:17], null, v1, v16, v[48:49]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v21, v50, v54
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v25, v52, v64
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[8:9], null, v9, v24, v[34:35]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v51, v55
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[17:18], null, v3, v18, v[31:32]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[9:10], null, v11, v26, v[35:36]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v3, v23, v27
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v52, v64
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[18:19], null, v5, v20, v[32:33]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v0, v50, v54
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v1, v21, v25
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[37:38], null, v14, v39, v[38:39]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[14:15], null, v15, v30, v[37:38]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[10:11], null, v13, v28, v[36:37]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[11:12], null, v51, v9, v[2:3]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v13, v21, v25
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v53, v14, v[6:7]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[9:10], null, v52, v10, v[4:5]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[4:5], null, v7, v65, v[5:6]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[5:6], null, v50, v8, v[0:1]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[6:7], null, v17, v55, v[11:12]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[7:8], null, v18, v64, v[9:10]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[2:3], null, v23, v4, v[3:4]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v8, v23, v27
+; GFX12-GISEL-NEXT: scratch_load_b32 v31, off, s32
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v32, v0, v16
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v33, v0, v16
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v17
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v16
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v16, v2, v18
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v17, v2, v18
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v19
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v18
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v18, v4, v20
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v19, v4, v20
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v4, v21
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v5, v20
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v20, v6, v22
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v21, v6, v22
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v6, v23
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v7, v22
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v22, v8, v24
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v23, v8, v24
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v8, v8, v25
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v9, v24
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v24, v10, v26
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v25, v10, v26
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v10, v10, v27
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v11, v11, v26
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v26, v12, v28
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v27, v12, v28
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v12, v12, v29
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v13, v13, v28
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v29, v14, v30
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v15, v15, v30
+; GFX12-GISEL-NEXT: v_add3_u32 v0, v0, v1, v33
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v2, v3, v17
+; GFX12-GISEL-NEXT: v_add3_u32 v2, v4, v5, v19
+; GFX12-GISEL-NEXT: v_add3_u32 v4, v8, v9, v23
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v28, v14, v30
+; GFX12-GISEL-NEXT: v_add3_u32 v3, v6, v7, v21
+; GFX12-GISEL-NEXT: v_add3_u32 v5, v10, v11, v25
+; GFX12-GISEL-NEXT: v_add3_u32 v6, v12, v13, v27
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v8, v32, v22
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v11, v16, v24
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v13, v18, v26
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v4, v32, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v16, v5
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v24
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v18, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v2, v26
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v3, v28
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v22
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v7, v32, v22
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v10, v16, v24
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v12, v18, v26
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v5, v1, v11
+; GFX12-GISEL-NEXT: v_add3_u32 v2, v6, v2, v13
+; GFX12-GISEL-NEXT: v_add3_u32 v0, v4, v0, v8
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v16, v54, v[5:6]
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[0:1], null, v21, v7, v[1:2]
-; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v6, v27, v[2:3]
-; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v13, v8
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v16, v7, v12
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v2, v7, v2
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v0, v12
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v5, v7, v12
+; GFX12-GISEL-NEXT: v_add3_u32 v0, v2, v0, v16
+; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v14, v31
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v14, v20, v28
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[3:4], null, v3, v25, v[0:1]
-; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v13, v8
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v13, v1, v[2:3]
+; GFX12-GISEL-NEXT: v_add3_u32 v9, v9, v15, v29
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v15, v20, v28
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v9, v20, v9
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v4, v10, v15
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v1, v15
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v6, v10, v15
+; GFX12-GISEL-NEXT: v_add3_u32 v3, v9, v3, v14
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v10, v3
+; GFX12-GISEL-NEXT: v_mul_hi_u32 v2, v5, v6
+; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v3, v1, v4
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v3, v0, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v0, v5, v6
+; GFX12-GISEL-NEXT: v_mul_lo_u32 v1, v5, v1
; GFX12-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-GISEL-NEXT: v_mad_co_u64_u32 v[1:2], null, v3, v8, v[1:2]
+; GFX12-GISEL-NEXT: v_add3_u32 v1, v1, v3, v2
; GFX12-GISEL-NEXT: s_setpc_b64 s[30:31]
entry:
%res = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> %v)
More information about the llvm-branch-commits
mailing list